prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import os
import numpy as np
from scipy.io import loadmat
import torch
from torch.utils.data import TensorDataset
import random
# challenge 2020
def ChallengeData(label_dir, data_dir, weights_file):
print('Loading data...')
normal_class = '426783006'
equivalent_classes = [['713427006', '59118001'], ['284470004', '63593006'], ['427172004', '17338001']]
# Find the label files.
print('Finding label...')
label_files = load_label_files(label_dir)
# Load the labels and classes.
print('Loading labels...')
classes, labels_onehot, labels = load_labels(label_files, normal_class, equivalent_classes)
# Load the weights for the Challenge metric.
print('Loading weights...')
weights = load_weights(weights_file, classes)
# Classes that are scored with the Challenge metric.
indices = np.any(weights, axis=0) # Find indices of classes in weight matrix.
# from scipy.io import savemat
# savemat('evaluation/scored_classes_indices.mat', {'val': indices})
# Load short signals and remove from labels
short_signals = loadmat(os.path.join(data_dir, 'short_signals.mat'))['val']
short_signals_ids = list(short_signals.reshape((short_signals.shape[1],)))
num_files = len(label_files)
recordings = list()
labels_onehot_new = list()
labels_new = list()
bb = []
dd = []
for i in range(num_files):
if i in short_signals_ids:
recording = np.zeros((1, 12, 3000))
else:
recording, header = load_challenge_data(label_files[i], data_dir)
recording[np.isnan(recording)] = 0
recordings.append(recording)
rr = np.array(recording)
if np.isnan(rr).any():
print(i)
bb.append(i)
dd.append(rr)
labels_onehot_new.append(labels_onehot[i])
labels_new.append(labels[i])
for i in range(len(recordings)):
if np.isnan(recordings[i]).any():
print(i)
# slided data
recordings_all = list()
labels_onehot_all = list()
labels_all = list()
for i in range(len(recordings)):
for j in range(recordings[i].shape[0]):
recordings_all.append(recordings[i][j])
labels_onehot_all.append(labels_onehot_new[i])
labels_all.append(labels_new[i])
recordings_all = np.array(recordings_all)
labels_onehot_all = np.array(labels_onehot_all)
recordings_preprocessed, labels_onehot = preprocessing(recordings_all, labels_onehot_all)
recordings_augmented, labels_onehot = augmentation(recordings_preprocessed, labels_onehot_all)
print(np.isnan(recordings_augmented).any())
num = recordings_augmented.shape[0]
c = []
a = []
for i in range(num):
if np.isnan(recordings_augmented[i]).any():
print(' {}/{}'.format(i, num))
c.append(i)
a.append(recordings_augmented[i])
print(c)
print(a)
# Get number of samples for each category
count = np.sum(labels_onehot, axis=0)
indices = indices
X = torch.from_numpy(recordings_augmented).float()
# Y = torch.from_numpy(labels_onehot)
Y = torch.from_numpy(labels_onehot).float()
dataset = TensorDataset(X, Y)
return dataset, indices
def preprocessing(recordings, labels):
return recordings, labels
def augmentation(recordings, labels):
return recordings, labels
def is_number(x):
try:
float(x)
return True
except ValueError:
return False
# Find Challenge files.
def load_label_files(label_directory):
label_files = list()
for f in sorted(os.listdir(label_directory)):
F = os.path.join(label_directory, f) # Full path for label file
if os.path.isfile(F) and F.lower().endswith('.hea') and not f.lower().startswith('.'):
# root, ext = os.path.splitext(f)
label_files.append(F)
if label_files:
return label_files
else:
raise IOError('No label or output files found.')
# Load labels from header/label files.
def load_labels(label_files, normal_class, equivalent_classes_collection):
# The labels_onehot should have the following form:
#
# Dx: label_1, label_2, label_3
#
num_recordings = len(label_files)
# Load diagnoses.
tmp_labels = list()
for i in range(num_recordings):
with open(label_files[i], 'r') as f:
for l in f:
if l.startswith('#Dx'):
dxs = set(arr.strip() for arr in l.split(': ')[1].split(','))
tmp_labels.append(dxs)
# Identify classes.
classes = set.union(*map(set, tmp_labels))
if normal_class not in classes:
classes.add(normal_class)
print('- The normal class {} is not one of the label classes, so it has been automatically added, but please check that you chose the correct normal class.'.format(normal_class))
classes = sorted(classes)
num_classes = len(classes)
# Use one-hot encoding for labels.
labels_onehot = np.zeros((num_recordings, num_classes), dtype=np.bool)
for i in range(num_recordings):
dxs = tmp_labels[i]
for dx in dxs:
j = classes.index(dx)
labels_onehot[i, j] = 1
# For each set of equivalent class, use only one class as the representative class for the set and discard the other classes in the set.
# The label for the representative class is positive if any of the labels_onehot in the set is positive.
remove_classes = list()
remove_indices = list()
for equivalent_classes in equivalent_classes_collection:
equivalent_classes = [x for x in equivalent_classes if x in classes]
if len(equivalent_classes)>1:
representative_class = equivalent_classes[0]
other_classes = equivalent_classes[1:]
equivalent_indices = [classes.index(x) for x in equivalent_classes]
representative_index = equivalent_indices[0]
other_indices = equivalent_indices[1:]
labels_onehot[:, representative_index] = np.any(labels_onehot[:, equivalent_indices], axis=1)
remove_classes += other_classes
remove_indices += other_indices
for x in remove_classes:
classes.remove(x)
labels_onehot = np.delete(labels_onehot, remove_indices, axis=1)
# If the labels_onehot are negative for all classes, then change the label for the normal class to positive.
normal_index = classes.index(normal_class)
for i in range(num_recordings):
num_positive_classes = np.sum(labels_onehot[i, :])
if num_positive_classes==0:
labels_onehot[i, normal_index] = 1
labels = list()
for i in range(num_recordings):
class_list = []
for j in range(len(classes)):
if labels_onehot[i][j] == True:
class_list.append(classes[j])
class_set = set()
class_set.update(class_list)
labels.append(class_set)
return classes, labels_onehot, labels
# Load challenge data.
def load_challenge_data(label_file, data_dir):
file = os.path.basename(label_file)
with open(label_file, 'r') as f:
header = f.readlines()
mat_file = file.replace('.hea', '.mat')
x = loadmat(os.path.join(data_dir, mat_file))
recording = np.asarray(x['val'], dtype=np.float64)
return recording, header
# Load weights.
def load_weights(weight_file, classes):
# Load the weight matrix.
rows, cols, values = load_table(weight_file)
assert(rows == cols)
num_rows = len(rows)
# Assign the entries of the weight matrix with rows and columns corresponding to the classes.
num_classes = len(classes)
weights = np.zeros((num_classes, num_classes), dtype=np.float64)
for i, a in enumerate(rows):
if a in classes:
k = classes.index(a)
for j, b in enumerate(rows):
if b in classes:
l = classes.index(b)
weights[k, l] = values[i, j]
return weights
# Load_table
def load_table(table_file):
# The table should have the following form:
#
# , a, b, c
# a, 1.2, 2.3, 3.4
# b, 4.5, 5.6, 6.7
# c, 7.8, 8.9, 9.0
#
table = list()
print(os.getcwd())
with open(table_file, 'r') as f:
for i, l in enumerate(f):
arrs = [arr.strip() for arr in l.split(',')]
table.append(arrs)
# Define the numbers of rows and columns and check for errors.
num_rows = len(table)-1
if num_rows<1:
raise Exception('The table {} is empty.'.format(table_file))
num_cols = set(len(table[i])-1 for i in range(num_rows))
if len(num_cols)!=1:
raise Exception('The table {} has rows with different lengths.'.format(table_file))
num_cols = min(num_cols)
if num_cols<1:
raise Exception('The table {} is empty.'.format(table_file))
# Find the row and column labels.
rows = [table[0][j+1] for j in range(num_rows)]
cols = [table[i+1][0] for i in range(num_cols)]
# Find the entries of the table.
values = np.zeros((num_rows, num_cols))
for i in range(num_rows):
for j in range(num_cols):
value = table[i+1][j+1]
if is_number(value):
values[i, j] = float(value)
else:
values[i, j] = float('nan')
return rows, cols, values
# Challenge2020 official evaluation
class ChallengeMetric():
def __init__(self, input_directory, weights_file):
# challengeMetric initialization
normal_class = '426783006'
equivalent_classes = [['713427006', '59118001'], ['284470004', '63593006'], ['427172004', '17338001']]
# Find the label files.
print('Finding label...')
label_files = load_label_files(input_directory)
# Load the labels and classes.
print('Loading labels...')
classes, labels_onehot, labels = load_labels(label_files, normal_class, equivalent_classes)
num_files = len(label_files)
print("num_files:", num_files)
# Load the weights for the Challenge metric.
print('Loading weights...')
weights = load_weights(weights_file, classes)
# Only consider classes that are scored with the Challenge metric.
indices = np.any(weights, axis=0) # Find indices of classes in weight matrix.
classes = [x for i, x in enumerate(classes) if indices[i]]
weights = weights[np.ix_(indices, indices)]
self.weights = weights
self.indices = indices
self.classes = classes
self.normal_class = normal_class
# Compute recording-wise accuracy.
def accuracy(self, outputs, labels):
outputs = self.get_pred(outputs)
outputs = outputs[:, self.indices]
labels = labels[:, self.indices]
num_recordings, num_classes = np.shape(labels)
num_correct_recordings = 0
for i in range(num_recordings):
if np.all(labels[i, :] == outputs[i, :]):
num_correct_recordings += 1
return float(num_correct_recordings) / float(num_recordings)
# Compute confusion matrices.
def confusion_matrices(self, outputs, labels, normalize=False):
# Compute a binary confusion matrix for each class k:
#
# [TN_k FN_k]
# [FP_k TP_k]
#
# If the normalize variable is set to true, then normalize the contributions
# to the confusion matrix by the number of labels per recording.
num_recordings, num_classes = np.shape(labels)
if not normalize:
A = np.zeros((num_classes, 2, 2))
for i in range(num_recordings):
for j in range(num_classes):
if labels[i, j] == 1 and outputs[i, j] == 1: # TP
A[j, 1, 1] += 1
elif labels[i, j] == 0 and outputs[i, j] == 1: # FP
A[j, 1, 0] += 1
elif labels[i, j] == 1 and outputs[i, j] == 0: # FN
A[j, 0, 1] += 1
elif labels[i, j] == 0 and outputs[i, j] == 0: # TN
A[j, 0, 0] += 1
else: # This condition should not happen.
raise ValueError('Error in computing the confusion matrix.')
else:
A = np.zeros((num_classes, 2, 2))
for i in range(num_recordings):
normalization = float(max(np.sum(labels[i, :]), 1))
for j in range(num_classes):
if labels[i, j] == 1 and outputs[i, j] == 1: # TP
A[j, 1, 1] += 1.0 / normalization
elif labels[i, j] == 0 and outputs[i, j] == 1: # FP
A[j, 1, 0] += 1.0 / normalization
elif labels[i, j] == 1 and outputs[i, j] == 0: # FN
A[j, 0, 1] += 1.0 / normalization
elif labels[i, j] == 0 and outputs[i, j] == 0: # TN
A[j, 0, 0] += 1.0 / normalization
else: # This condition should not happen.
raise ValueError('Error in computing the confusion matrix.')
return A
# Compute macro F-measure.
def f_measure(self, outputs, labels):
outputs = self.get_pred(outputs)
outputs = outputs[:, self.indices]
labels = labels[:, self.indices]
num_recordings, num_classes = np.shape(labels)
A = self.confusion_matrices(outputs, labels)
f_measure = np.zeros(num_classes)
for k in range(num_classes):
tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]
if 2 * tp + fp + fn:
f_measure[k] = float(2 * tp) / float(2 * tp + fp + fn)
else:
f_measure[k] = float('nan')
macro_f_measure = np.nanmean(f_measure)
return macro_f_measure
# Compute F-beta and G-beta measures from the unofficial phase of the Challenge.
def macro_f_beta_measure(self, outputs, labels, beta=2):
return self.beta_measures(outputs, labels, beta)[0]
def macro_g_beta_measure(self, outputs, labels, beta=2):
return self.beta_measures(outputs, labels, beta)[1]
def beta_measures(self, outputs, labels, beta=2):
outputs = self.get_pred(outputs)
outputs = outputs[:, self.indices]
labels = labels[:, self.indices]
num_recordings, num_classes = np.shape(labels)
A = self.confusion_matrices(outputs, labels, normalize=True)
f_beta_measure = np.zeros(num_classes)
g_beta_measure = np.zeros(num_classes)
for k in range(num_classes):
tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]
if (1 + beta ** 2) * tp + fp + beta ** 2 * fn:
f_beta_measure[k] = float((1 + beta ** 2) * tp) / float((1 + beta ** 2) * tp + fp + beta ** 2 * fn)
else:
f_beta_measure[k] = float('nan')
if tp + fp + beta * fn:
g_beta_measure[k] = float(tp) / float(tp + fp + beta * fn)
else:
g_beta_measure[k] = float('nan')
macro_f_beta_measure = np.nanmean(f_beta_measure)
macro_g_beta_measure =
|
np.nanmean(g_beta_measure)
|
numpy.nanmean
|
#!/usr/bin/env python
# coding: utf-8
# In[8]:
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
def mean_df(df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(level=0).mean()
df = pd.read_hdf('/Users/bailujia/Downloads/train.h5', index_col='id')
# In[9]:
folds = pd.read_csv('/Users/bailujia/Downloads/folds.csv')
validset = folds.loc[folds['fold'] != 0]
validset.index = range(validset.shape[0])
df = df.reindex(validset['id'])
df = mean_df(df)
# In[10]:
import numpy as np
def get_target():
target = np.zeros((validset.shape[0],1103))
index = validset['attribute_ids'].str.split()
for i in range(validset.shape[0]):
label = np.array([int(cls) for cls in index[i]])
target[i,label] = 1
return target
# In[12]:
target = get_target()
# In[13]:
#class 0
target[:,0]
# In[14]:
X= np.array(df)
# In[15]:
print(X.shape)
# In[16]:
from collections import defaultdict, Counter
import random
import pandas as pd
import tqdm
# In[17]:
def make_folds(df, n_folds: int) -> pd.DataFrame:
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split()
for cls in classes)
fold_cls_counts = defaultdict(int)
folds = [-1] * len(df)
for item in tqdm.tqdm(df.sample(frac=1, random_state=42).itertuples(),
total=len(df)):
cls = min(item.attribute_ids.split(), key=lambda cls: cls_counts[cls])
fold_counts = [(f, fold_cls_counts[f, cls]) for f in range(n_folds)]
min_count = min([count for _, count in fold_counts])
random.seed(item.Index)
fold = random.choice([f for f, count in fold_counts
if count == min_count])
folds[item.Index] = fold
for cls in item.attribute_ids.split():
fold_cls_counts[fold, cls] += 1
df['fold'] = folds
return df
# In[18]:
kf = make_folds(validset,5)
# In[40]:
X = np.array(df)
# In[47]:
np.array(ind)
# In[46]:
ind = []
for k in range(1103):
if np.sum(target[:,k] ==0)*np.sum(target[:,k] ==1)>0:
if np.max(X[target[:,k] ==0 ,k]) < np.min(X[target[:,k] ==1 ,k]):
if np.min(X[target[:,k] ==1 ,k])>0.9:
if X[target[:,k] ==1,k].shape[0] > 5:
ind.append(k)
print(k,np.max(X[target[:,k] ==0 ,k]), np.min(X[target[:,k] ==1 ,k]))
# In[34]:
np.array(ind)
# In[45]:
ind = []
for k in range(1103):
if np.sum(target[:,k] ==0)*np.sum(target[:,k] ==1)>0:
if np.max(X[target[:,k] ==0 ,k]) < np.min(X[target[:,k] ==1 ,k]):
if 0.9>= np.min(X[target[:,k] ==1 ,k])>0.8:
if X[target[:,k] ==1,k].shape[0] > 5:
ind.append(k)
print(k,np.max(X[target[:,k] ==0 ,k]), np.min(X[target[:,k] ==1 ,k]))
# In[44]:
np.array(ind)
# In[43]:
ind = []
for k in range(1103):
if np.sum(target[:,k] ==0)*np.sum(target[:,k] ==1)>0:
if np.max(X[target[:,k] ==0 ,k]) < np.min(X[target[:,k] ==1 ,k]):
if 0.8>= np.min(X[target[:,k] ==1 ,k])>0.7:
if X[target[:,k] ==1,k].shape[0] > 5:
ind.append(k)
print(k,np.max(X[target[:,k] ==0 ,k]), np.min(X[target[:,k] ==1 ,k]))
# In[ ]:
np.array(ind)
# In[50]:
np.linspace(0.2,0.7,6)
# In[55]:
for th in np.linspace(0.2,0.7,6):
print(th)
ind = []
for k in range(1103):
if np.sum(target[:,k] ==0)*np.sum(target[:,k] ==1)>0:
if np.max(X[target[:,k] ==0 ,k]) < np.min(X[target[:,k] ==1 ,k]):
if (0.1+th)>=
|
np.min(X[target[:,k] ==1 ,k])
|
numpy.min
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions to create the connection table in view to enforce symmetry in the
topological optimization
<NAME> EPFL 2018
"""
# System libs
import os
import json
import argparse
import importlib
import sys
# Third party libs
import numpy as np
import scipy.misc
import scipy
def index_to_position(index, nelx, nely):
"""
Convert the index of a element to the centroid of the element
"""
pos = np.array([(index / (nely))+.5, int(index%(nely))+.5])
return pos
def position_to_index(pos, nelx, nely):
"""
Convert a position vector to the index of the element containing it
"""
index = int(pos[0])*nely + int(pos[1])
return index
def add_symmetry_planes(a_array, c_array, a, c, empty = 0):
"""
Use to add a new symmetry plane to the problem
"""
if len(a_array)==0 or len(c_array)==0 :
a_array = [np.array(a/np.sqrt(np.dot(a,a)))]
c_array = [np.array(c)]
else :
a = a/np.sqrt(np.dot(a,a))
a_array = np.append(a_array,[a],0)
c_array = np.append(c_array,[c],0)
return a_array, c_array
def get_symmetry_image(a_array, c_array, nelx, nely):
"""
Create an image with line showing the symmetry planes
"""
image = np.ones(nelx*nely)
Xmin = index_to_position(0, nelx, nely)
Xmax = index_to_position(nelx*nely-1, nelx, nely)
rmax = np.sqrt(np.dot(Xmax-Xmin,Xmax-Xmin)) #Length scale of mesh
for i in range(nelx*nely):
X_i = index_to_position(i, nelx, nely)
if(in_domain(X_i, a_array, c_array)==0):
image[i] = .5;
if(in_domain(X_i, a_array, c_array)==2):
image[i]=0;
return (image.reshape(nely,nelx))
def in_domain(X, a_array, c_array):
"""
Check is a given point is inside or outside the design domain
"""
flag = 1
for n in range(np.shape(a_array)[0]):
a = a_array[n]
c = c_array[n]
dist = np.dot(X-c,a)
if(dist > 0.7):
flag = 0
if(abs(dist)<0.7):
flag = 2
return flag
def get_symmetric_element(index, a, c, nelx, nely):
"""
Return the index of the symmetric element w.r.t. a plane (a,c)
"""
x_i = index_to_position(index,nelx,nely)
dist = np.dot(x_i-c,a)
x_proj = x_i - dist * a
x_sym = x_proj - dist * a
index_sym = position_to_index(x_sym,nelx,nely)
return index_sym
def construct_connection_table(a_array, c_array, nelx, nely):
"""
Simple algorithm O(nelx*nely) to construct the table containing
for an element i, outside the design domain, its symmetric
element j inside the design domain. Symmetry planes variant.
"""
connection_table = np.array(range(nelx*nely))
for n in range(np.shape(a_array)[0]):
a = a_array[n]
c = c_array[n]
for i in range(nelx*nely):
X_i = index_to_position(i,nelx,nely)
index_sym = connection_table[i]
if in_domain(X_i, [a], [c])!=1:
index_sym = connection_table[get_symmetric_element(i,a,c,nelx,nely)]
connection_table[i]=index_sym
# print(connection_table.reshape(nelx,nely))
# sys.exit('Stop')
return connection_table
def construct_mapping_vector_wheel(Nsector, nelx, nely):
"""
Simple algorithm O(nelx*nely) to construct the table containing
for an element i, outside the design domain, its symmetric
element j inside the design domain. Symmetry sectors variant.
"""
mapping_vector = [[i] for i in range(nelx*nely)]
phi = 2*np.pi/Nsector
c = np.array([nelx/2,nely/2]); #Center of the circle
for i in range(nelx*nely):
X_i = index_to_position(i,nelx,nely) - c #All positions are centered to the circle
r_i = np.sqrt(np.dot(X_i,X_i))
if r_i <= nelx/2 :
phi_i = np.arctan2(X_i[1],X_i[0])
if phi_i < phi/2 :
for j in range(Nsector):
theta = phi_i + phi*j
#Symmetry with others sectors
if j > 0 :
x_sym = r_i*np.cos(theta)
y_sym = r_i*np.sin(theta)
sym_pos = np.array([x_sym,y_sym]) + c
sym_idx = position_to_index(sym_pos,nelx,nely)
mapping_vector[i].append(sym_idx)
#Middle sector symmetry
x_sym = r_i*np.cos(theta + phi - 2 * phi_i)
y_sym = r_i*np.sin(theta + phi - 2 * phi_i)
sym_pos = np.array([x_sym,y_sym]) + c
sym_idx = position_to_index(sym_pos,nelx,nely)
mapping_vector[i].append(sym_idx)
#print(np.array(mapping_vector).reshape(nelx,nely))
#sys.exit('Stop')
return mapping_vector
def construct_mapping_vector_rockingchair(Nsector, nelx, nely):
"""
Simple algorithm O(nelx*nely) to construct the table containing
for an element i, outside the design domain, its symmetric
element j inside the design domain. Symmetry sectors variant.
"""
if Nsector%2 == 0:
sys.exit('ABORT : Please put an odd number of sector in json file')
mapping_vector = [[i] for i in range(nelx*nely)]
phi = np.pi/Nsector
c = np.array([nelx/2,nely/2]); #Center of the circle
for i in range(nelx*nely):
X_i = index_to_position(i,nelx,nely) - c #All positions are centered to the circle
r_i = np.sqrt(np.dot(X_i,X_i))
if r_i <= nelx/2 :
phi_i = np.arctan2(X_i[1],X_i[0])
if phi_i > 0 and phi_i < phi:
for j in range(Nsector):
theta = phi_i + phi*j
#Symmetry with others sectors
if j > 0 :
x_sym = r_i*np.cos(theta)
y_sym = r_i*np.sin(theta)
sym_pos = np.array([x_sym,y_sym]) + c
sym_idx = position_to_index(sym_pos,nelx,nely)
mapping_vector[i].append(sym_idx)
#middle sector symmetry
x_sym = r_i*np.cos(theta + phi - 2 * phi_i)
y_sym = r_i*np.sin(theta + phi - 2 * phi_i)
sym_pos =
|
np.array([x_sym,y_sym])
|
numpy.array
|
import numpy
def calc_extinction_sphere_primary(
f_sq, radius, volume_unit_cell, cos_2theta, wavelength,
flag_f_sq: bool = False, flag_radius: bool = False,
flag_volume_unit_cell: bool = False, flag_cos_2theta: bool = False,
flag_wavelength: bool = False):
"""Primary extinction for the sphere (Acta.Cryst.(1974), A30, 129)
"""
x = 1.5 * f_sq * numpy.square(wavelength*radius/volume_unit_cell)
A = 0.20 + 0.45 * cos_2theta
B = 0.22 - 0.12 * (0.5-cos_2theta)**2
x_sq = numpy.square(x)
y_p = 1./numpy.sqrt(1 + 2 * x + A * x_sq / (1.+ B*x))
dder = {}
der_yp_x = -0.5*numpy.power(y_p, 3)*(
2. + 2.*A*x / (1.+B*x) - A*B*x_sq / numpy.square(1.+ B*x))
if flag_f_sq:
ones_f_sq = numpy.ones_like(f_sq)
dder["f_sq"] = der_yp_x * 1.5 * numpy.square(
wavelength*radius/volume_unit_cell)*ones_f_sq
if flag_radius:
ones_radius = numpy.ones_like(radius)
dder["radius"] = der_yp_x * 3.0 * f_sq * radius*numpy.square(
wavelength/volume_unit_cell)
if flag_volume_unit_cell:
ones_volume_unit_cell = numpy.ones_like(volume_unit_cell)
dder["volume_unit_cell"] = -3.0 * der_yp_x * f_sq * numpy.square(
wavelength*radius/volume_unit_cell) * \
ones_volume_unit_cell / volume_unit_cell
pass
if flag_cos_2theta:
ones_cos_2theta = numpy.ones_like(cos_2theta)
der_A_cos_2theta = 0.45
der_B_cos_2theta = 0.24 * (0.5-cos_2theta)
der_yp_A = -0.5*numpy.power(y_p, 3)*x_sq / (1.+ B*x)
der_yp_B = 0.5*numpy.power(y_p, 3)*A * x_sq*x / numpy.square(1.+ B*x)
dder["cos_2theta"] = (der_yp_A*der_A_cos_2theta +
der_yp_B*der_B_cos_2theta) * ones_cos_2theta
if flag_wavelength:
ones_wavelength = numpy.ones_like(wavelength)
dder["wavelength"] = der_yp_x * 3.0 * f_sq * wavelength*numpy.square(
radius/volume_unit_cell) * ones_wavelength
return y_p, dder
def calc_extinction_sphere_secondary_gauss(
f_sq, radius, mosaicity, volume_unit_cell, cos_2theta, wavelength,
flag_f_sq: bool = False, flag_radius: bool = False,
flag_mosaicity: bool = False,
flag_volume_unit_cell: bool = False, flag_cos_2theta: bool = False,
flag_wavelength: bool = False):
"""Secondary Gauss extinction for the sphere (Acta.Cryst.(1974), A30, 129)
mosaicity are given in minutes
"""
mosaicity_rad = mosaicity*numpy.pi/(180*60) #transfer minutes to radians
if not(radius*mosaicity_rad>0):
term_1 = numpy.zeros_like(cos_2theta)
x = numpy.zeros_like(term_1)
else:
term_1 = mosaicity_rad / numpy.sqrt(numpy.square(mosaicity_rad * wavelength) +
9./8. * numpy.square(radius) *
(1.- numpy.square(cos_2theta)))
x = 1.5 * f_sq * wavelength * \
|
numpy.square(wavelength*radius/volume_unit_cell)
|
numpy.square
|
import logging, numpy as np, scipy.stats
from annoy import AnnoyIndex
from sklearn.decomposition import TruncatedSVD
from libact.base.dataset import ensure_sklearn_compat
logger = logging.getLogger('actleto')
class ADWeS(object):
def __init__(self,
dataset,
basic_strategy,
svd_components=300,
index_trees=10,
get_nearest_n=10,
get_most_uncertain_n=0,
exp_rel_power=0.8,
exp_rel_rate=1.0,
uncertainty_factor=0.5,
us_method='lc',
plot_each=20):
self.dataset = dataset
self.basic_strategy = basic_strategy
self.get_nearest_n = get_nearest_n
self.get_most_uncertain_n = get_most_uncertain_n
self.exp_rel_power = exp_rel_power
self.exp_rel_rate = exp_rel_rate
self.uncertainty_factor = uncertainty_factor
self.us_method = us_method
self.plot_each = plot_each
self.index = AnnoyIndex(svd_components)
all_features = ensure_sklearn_compat(zip(*dataset.data)[0])
self.data = TruncatedSVD(n_components=svd_components).fit_transform(all_features)
for i, item in enumerate(self.data):
self.index.add_item(i, item)
self.index.build(index_trees)
self.labeled_ids = set() # will be updated in make_query before all job
# calculate mean and maximum distances
self.explore_relevance = []
self.explore_relevance_max = 0
for i in range(self.data.shape[0]):
cur_dist = self.index.get_nns_by_item(i,
self.get_nearest_n,
include_distances=True)[1]
if len(cur_dist) > 0:
cur_mean = np.mean(cur_dist)
cur_max_dist = np.max(cur_dist)
if cur_max_dist > self.explore_relevance_max:
self.explore_relevance_max = cur_max_dist
else:
cur_mean = np.nan
self.explore_relevance.append(cur_mean)
self.explore_relevance = np.array(self.explore_relevance)
# fill na
samples_without_neighbors = np.isnan(self.explore_relevance)
self.explore_relevance[samples_without_neighbors] = self.explore_relevance_max
# normalize
logger.debug('init dist %s' % str(scipy.stats.describe(self.explore_relevance)))
self.explore_relevance = ((self.explore_relevance - self.explore_relevance.min()) /
(self.explore_relevance.max() - self.explore_relevance.min()))
self.explore_relevance = (1 - self.explore_relevance) ** self.exp_rel_power
self.iter_i = 0
def make_query(self, return_score=False):
self._update_exp_rel()
self.model.train(self.dataset)
unlabeled_entry_ids, X_pool = list(zip(*self.dataset.get_unlabeled_entries()))
unlabeled_entry_ids = np.asarray(unlabeled_entry_ids)
X_pool = ensure_sklearn_compat(X_pool)
_, ids_with_scores = self.base_strategy.make_query(return_score=True)
unlabeled_entry_ids, base_score = zip(*ids_with_scores)
# normalize: we dont care about absolute values, only relative to rank samples
#base_score = base_score - base_score.mean()
#base_score /= base_score.std()
base_score = base_score - base_score.min()
base_score /= base_score.max()
if self.get_most_uncertain_n > 0:
most_base_relevant_indices = np.argpartition(-base_score, self.get_most_uncertain_n)[:self.get_most_uncertain_n]
else:
most_base_relevant_indices = list(range(len(base_score)))
most_base_relevant_ids = unlabeled_entry_ids[most_base_relevant_indices]
most_base_relevant_score = base_score[most_base_relevant_indices]
logger.debug('most base relevant score %s' % str(scipy.stats.describe(most_base_relevant_score)))
most_base_relevant_exp_rel = self.explore_relevance[most_base_relevant_ids]
# normalize: we dont care about absolute values, only relative to rank samples
#most_uncertain_exp_rel = most_uncertain_exp_rel - most_uncertain_exp_rel.mean()
#most_uncertain_exp_rel /= most_uncertain_exp_rel.std()
most_base_relevant_exp_rel = most_base_relevant_exp_rel - most_base_relevant_exp_rel.min()
most_base_relevant_exp_rel /= most_base_relevant_exp_rel.max()
logger.debug('most exp rel %s' % str(scipy.stats.describe(most_base_relevant_exp_rel)))
# f-beta
result_score = ((1 + self.uncertainty_factor ** 2) * most_base_relevant_score * most_base_relevant_exp_rel /
((self.uncertainty_factor ** 2) * most_base_relevant_score + most_base_relevant_exp_rel))
#result_score = (self.uncertainty_factor * most_uncertain_uncert_score
# + (1 - self.uncertainty_factor) * most_uncertain_exp_rel)
result_score[np.isnan(result_score)] = 0.0
logger.debug('most res %s' % str(scipy.stats.describe(result_score)))
# if self.iter_i % self.plot_each == 0:
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots()
# fig.set_size_inches((9, 6))
# ax.hist(most_base_relevant_score, label='uncert')
# ax.hist(most_base_relevant_exp_rel, label='exp_rel')
# ax.hist(result_score, label='res')
# fig.savefig('./debug/%05d_hist.png' % self.iter_i)
# plt.close(fig)
#
# _, ax = plot_samples(np.array([most_base_relevant_score,
# most_base_relevant_exp_rel]).T,
# result_score,
# with_kde=False,
# filename='./debug/%05d_scores.png' % self.iter_i,
# do_not_display=True)
# ax.set_xlabel('uncert')
# ax.set_ylabel('exp_rel')
best_i = np.argmax(result_score)
best_id = most_base_relevant_ids[best_i]
logger.debug('best %r %r %r %r' % (best_i,
result_score[best_i],
most_base_relevant_score[best_i],
most_base_relevant_exp_rel[best_i]))
if return_score:
return best_id, \
list(zip(most_base_relevant_ids, result_score))
else:
return best_id
def _update_exp_rel(self):
data = self.dataset.data
newly_labeled_ids = { i for i in range(len(data))
if not data[i][1] is None
and not i in self.labeled_ids }
self.labeled_ids.update(newly_labeled_ids)
for ex_id in newly_labeled_ids:
neighbor_ids, neighbor_dist = self.index.get_nns_by_item(ex_id,
self.get_nearest_n,
include_distances=True)
neighbor_dist = np.asarray(neighbor_dist, dtype='float')
neighbor_discount_factor = (1 - neighbor_dist / self.explore_relevance_max) ** self.exp_rel_power
neighbor_discount_factor= 1 - self.exp_rel_rate * neighbor_discount_factor
#logger.debug('dist: %s' % neighbor_dist)
#logger.debug('factor: %s' % neighbor_discount_factor)
assert np.count_nonzero(
|
np.isnan(neighbor_discount_factor)
|
numpy.isnan
|
import random
import operator
import time
import numpy as np
import tensorflow as tf
from multiprocessing import Process, Pipe
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
def tf_sum(x, axis=None, keepdims=False):
axis = None if axis is None else [axis]
return tf.reduce_sum(x, axis=axis, keep_dims=keepdims)
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
# Usually it's easier to define the negative logprob
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
return - self.neglogp(x)
class PdType(object):
"""
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return 0.5 * tf_sum(tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf_sum(self.logstd, axis=-1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return tf_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1)
def entropy(self):
return tf_sum(self.logstd + .5 *
|
np.log(2.0 * np.pi * np.e)
|
numpy.log
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA2D
#
# https://github.com/CNES/Pandora2D
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions associated to raster images.
"""
import xarray as xr
import numpy as np
from scipy.ndimage.interpolation import shift
def shift_img_pandora2d(img_right: xr.Dataset, dec_row: int) -> xr.Dataset:
"""
Return a Dataset that contains the shifted right images
:param img_right: right Dataset image containing :
- im : 2D (row, col) xarray.Datasat
:type img_right: xr.Dataset
:param dec_row: the value of shifting for dispy
:type dec_row: int
:return: img_right_shift: Dataset containing the shifted image
:rtype: xr.Dataset
"""
# dimensions of images
nrow_, ncol_ = img_right["im"].shape
# shifted image by scipy
data = shift(img_right["im"].data, (-dec_row, 0), cval=img_right.attrs["no_data_img"])
# create shifted image dataset
img_right_shift = xr.Dataset(
{"im": (["row", "col"], data)}, coords={"row":
|
np.arange(nrow_)
|
numpy.arange
|
import cntk as C
import logging
import datetime
import numpy as np
import pandas as pd
# pip install git+https://github.com/pydata/pandas-datareader.git
from pandas_datareader import data
import time
import traceback
logging.basicConfig(level=10, format="%(asctime)s - [%(levelname)8s] - %(name)s - %(message)s")
log = logging.getLogger("next_day_trend")
# fix a random seed for CNTK components
C.cntk_py.set_fixed_random_seed(1)
# default='warn'
pd.options.mode.chained_assignment = None
# Set a random seed
np.random.seed(123)
class NextDayTrend:
def __init__(self, source, contract, start, end, target_date):
self.source = source
self.contract = contract
self.start = start
self.end = end
self.target_date = target_date
self.response = ""
@staticmethod
def _create_model(net_input, num_output_classes, num_hidden_layers, hidden_layers_dim):
h = net_input
with C.layers.default_options(init=C.glorot_uniform()):
for i in range(num_hidden_layers):
h = C.layers.Dense(hidden_layers_dim,
activation=C.relu)(h)
return C.layers.Dense(num_output_classes, activation=None)(h)
@staticmethod
# Defines a utility that prints the training progress
def _print_training_progress(trainer, mb, frequency, verbose=1):
training_loss = "NA"
eval_error = "NA"
if mb % frequency == 0:
training_loss = trainer.previous_minibatch_loss_average
eval_error = trainer.previous_minibatch_evaluation_average
if verbose:
log.info("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}%".format(mb, training_loss, eval_error * 100))
return mb, training_loss, eval_error
def _get_asset_data(self):
retry_cnt, max_num_retry = 0, 3
while retry_cnt < max_num_retry:
try:
end = datetime.datetime.now()
return data.DataReader(self.contract, self.source, "2000-01-01", end.date())
except:
retry_cnt += 1
time.sleep(np.random.randint(1, 10))
log.error("{} is not reachable".format(self.source))
return []
def asset_trend(self):
try:
asset_data = self._get_asset_data()
# Feature name list
predictor_names = []
if "Close" in asset_data and "Volume" in asset_data:
close_tag = "Close"
volume_tag = "Volume"
elif "close" in asset_data and "volume" in asset_data:
close_tag = "close"
volume_tag = "volume"
else:
return {"error": "Couldn't find Close|Volume data."}
# Compute price difference as a feature
asset_data["diff"] = np.abs(
(asset_data[close_tag] - asset_data[close_tag].shift(1)) / asset_data[close_tag]).fillna(0)
predictor_names.append("diff")
# Compute the volume difference as a feature
asset_data["v_diff"] = np.abs(
(asset_data[volume_tag] - asset_data[volume_tag].shift(1)) / asset_data[volume_tag]).fillna(0)
predictor_names.append("v_diff")
# Compute the asset being up (1) or down (0) over different day offsets compared to current closing price
num_days_back = 8
for i in range(1, num_days_back + 1):
# i: number of look back days
asset_data["p_" + str(i)] = np.where(asset_data[close_tag] > asset_data[close_tag].shift(i), 1, 0)
predictor_names.append("p_" + str(i))
asset_data["next_day"] = np.where(asset_data[close_tag].shift(-1) > asset_data[close_tag], 1, 0)
# The label must be one-hot encoded
asset_data["next_day_opposite"] = np.where(asset_data["next_day"] == 1, 0, 1)
# Establish the start and end date of our training timeseries
training_data = asset_data[self.start:self.end]
training_features = np.asarray(training_data[predictor_names], dtype="float32")
training_labels = np.asarray(training_data[["next_day", "next_day_opposite"]], dtype="float32")
# Lets build the network
input_dim = 2 + num_days_back
# Remember we need to have 2 since we are trying to classify if the market goes up or down 1 hot encoded
num_output_classes = 2
num_hidden_layers = 2
hidden_layers_dim = 2 + num_days_back
input_dynamic_axes = [C.Axis.default_batch_axis()]
net_input = C.input_variable(input_dim, dynamic_axes=input_dynamic_axes)
label = C.input_variable(num_output_classes, dynamic_axes=input_dynamic_axes)
z = self._create_model(net_input, num_output_classes, num_hidden_layers, hidden_layers_dim)
loss = C.cross_entropy_with_softmax(z, label)
label_error = C.classification_error(z, label)
lr_per_minibatch = C.learning_parameter_schedule(0.125)
trainer = C.Trainer(z, (loss, label_error), [C.sgd(z.parameters, lr=lr_per_minibatch)])
# Initialize the parameters for the trainer, we will train in large minibatches in sequential order
minibatch_size = 100
num_minibatches = len(training_data.index) // minibatch_size
# Run the trainer on and perform model training
training_progress_output_freq = 1
# Visualize the loss over minibatch
plotdata = {"batchsize": [], "loss": [], "error": []}
# It is key that we make only one pass through the data linearly in time
num_passes = 1
l_training_features = len(training_features)
training_features = training_features[:l_training_features - (l_training_features % num_minibatches)]
l_training_labels = len(training_labels)
training_labels = training_labels[:l_training_labels - (l_training_labels % num_minibatches)]
# Train our neural network
tf = np.split(training_features, num_minibatches)
tl = np.split(training_labels, num_minibatches)
for i in range(num_minibatches * num_passes): # multiply by the
features = np.ascontiguousarray(tf[i % num_minibatches])
labels =
|
np.ascontiguousarray(tl[i % num_minibatches])
|
numpy.ascontiguousarray
|
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import *
class ThreeLayerConvNet(object):
"""
A three-layer convolutional network with the following architecture:
conv - relu - 2x2 max pool - affine - relu - affine - softmax
The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
"""
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32):
"""
Initialize a new network.
Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: Number of filters to use in the convolutional layer
- filter_size: Size of filters to use in the convolutional layer
- hidden_dim: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
self.params = {}
self.reg = reg
self.dtype = dtype
############################################################################
# TODO: Initialize weights and biases for the three-layer convolutional #
# network. Weights should be initialized from a Gaussian with standard #
# deviation equal to weight_scale; biases should be initialized to zero. #
# All weights and biases should be stored in the dictionary self.params. #
# Store weights and biases for the convolutional layer using the keys 'W1' #
# and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #
# hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #
# of the output affine layer. #
############################################################################
C, H, W = input_dim
self.params['W1'] = np.random.normal(loc=0, scale=weight_scale, size=(num_filters, C, filter_size, filter_size))
self.params['b1'] = np.zeros((num_filters,))
shape_after_pool = (H - 2) / 2 + 1, (W - 2) / 2 + 1
assert shape_after_pool[0] == int(shape_after_pool[0])
assert shape_after_pool[1] == int(shape_after_pool[1])
shape_after_pool = int(shape_after_pool[0]), int(shape_after_pool[1])
self.params['W2'] = np.random.normal(loc=0, scale=weight_scale, size=(
num_filters * shape_after_pool[0] * shape_after_pool[1], hidden_dim))
self.params['b2'] = np.zeros((hidden_dim,))
self.params['W3'] = np.random.normal(loc=0, scale=weight_scale, size=(hidden_dim, num_classes))
self.params['b3'] = np.zeros((num_classes,))
############################################################################
# END OF YOUR CODE #
############################################################################
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network.
Input / output: Same API as TwoLayerNet in fc_net.py.
"""
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3']
# pass conv_param to the forward pass for the convolutional layer
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer convolutional net, #
# computing the class scores for X and storing them in the scores #
# variable. #
############################################################################
layer1, l1_cache = conv_relu_pool_forward(X, self.params['W1'], self.params['b1'], conv_param, pool_param)
layer1_flatten =
|
np.reshape(layer1, (-1, self.params['W2'].shape[0]))
|
numpy.reshape
|
"""Tests for a policy returning random unit vectors."""
import pathlib
import numpy as np
import pytest
from pytest_cases import parametrize, parametrize_with_cases
from probnum.linalg.solvers import LinearSolverState, policies
case_modules = (pathlib.Path(__file__).parent / "cases").stem
cases_policies = case_modules + ".policies"
cases_states = case_modules + ".states"
@parametrize_with_cases("policy", cases=cases_policies, glob="*unit_vector*")
@parametrize_with_cases("state", cases=cases_states)
@parametrize("seed", [1, 3, 42])
def test_returns_unit_vector(
policy: policies.LinearSolverPolicy, state: LinearSolverState, seed: int
):
rng =
|
np.random.default_rng(seed)
|
numpy.random.default_rng
|
import pytest
from recOrder.io.config_reader import ConfigReader
from recOrder.pipelines.pipeline_manager import PipelineManager
from recOrder.pipelines.phase_from_bf_pipeline import PhaseFromBF
from waveorder.io.writer import WaveorderWriter
from recOrder.compute.qlipp_compute import reconstruct_qlipp_stokes, reconstruct_qlipp_birefringence, \
reconstruct_phase3D, reconstruct_phase2D
from os.path import dirname, abspath
import numpy as np
import os
import zarr
def test_pipeline_manager_initiate(setup_BF_test_data_zarr, setup_data_save_folder):
folder, data = setup_BF_test_data_zarr
save_folder = setup_data_save_folder
path_to_config = os.path.join(dirname(dirname(abspath(__file__))), 'test_configs/phase/config_phase_full_pytest.yml')
config = ConfigReader(path_to_config, data_dir=data, save_dir=save_folder)
manager = PipelineManager(config)
assert(manager.config is not None)
assert(manager.data is not None)
assert(manager.data.get_num_positions()*manager.data.frames == len(manager.pt_set))
assert(manager.pipeline is not None)
assert(isinstance(manager.pipeline, PhaseFromBF))
def test_qlipp_pipeline_initiate(setup_BF_test_data_zarr, setup_data_save_folder):
folder, data = setup_BF_test_data_zarr
save_folder = setup_data_save_folder
path_to_config = os.path.join(dirname(dirname(abspath(__file__))), 'test_configs/phase/config_phase_full_pytest.yml')
config = ConfigReader(path_to_config, data_dir=data, save_dir=save_folder)
manager = PipelineManager(config)
pipeline = manager.pipeline
assert(pipeline.config == manager.config)
assert(pipeline.data == manager.data)
assert(pipeline.t == manager.num_t)
assert(pipeline.mode == '3D')
assert(pipeline.slices == manager.data.slices)
assert(pipeline.img_dim == (manager.data.height, manager.data.width, manager.data.slices))
assert(pipeline.bf_chan_idx == 0)
assert(pipeline.fluor_idxs == [])
assert(pipeline.data_shape == (manager.data.frames, manager.data.channels,
manager.data.slices, manager.data.height, manager.data.width))
assert(pipeline.chunk_size == (1, 1, 1, manager.data.height, manager.data.width))
assert(isinstance(pipeline.writer, WaveorderWriter))
assert(pipeline.reconstructor is not None)
def test_pipeline_manager_run(setup_BF_test_data_zarr, setup_data_save_folder):
folder, data = setup_BF_test_data_zarr
save_folder = setup_data_save_folder
path_to_config = os.path.join(dirname(dirname(abspath(__file__))), 'test_configs/phase/config_phase_full_pytest.yml')
config = ConfigReader(path_to_config, data_dir=data, save_dir=save_folder)
manager = PipelineManager(config)
manager.run()
store = zarr.open(os.path.join(save_folder, '2T_3P_81Z_231Y_498X_Kazansky.zarr'))
array = store['Row_0']['Col_0']['Pos_000']['array']
assert (store.attrs.asdict()['Config'] == config.yaml_dict)
assert (store['Row_0']['Col_0']['Pos_000'])
assert (store['Row_0']['Col_1']['Pos_001'])
assert (store['Row_0']['Col_2']['Pos_002'])
assert (array.shape == (2, 1, 81, manager.data.height, manager.data.width))
def test_3D_reconstruction(setup_BF_test_data_zarr, setup_data_save_folder):
folder, data = setup_BF_test_data_zarr
save_folder = setup_data_save_folder
path_to_config = os.path.join(dirname(dirname(abspath(__file__))), 'test_configs/phase/config_phase_3D_pytest.yml')
config = ConfigReader(path_to_config, data_dir=data, save_dir=save_folder)
manager = PipelineManager(config)
assert(manager.pipeline.mode == '3D')
manager.run()
pos, t, z = 1, 0, 40
data = manager.data.get_array(pos)
recon = manager.pipeline.reconstructor
phase3D = reconstruct_phase3D(np.transpose(data[t, 0], (1, 2, 0)), recon, method=config.phase_denoiser_3D,
reg_re=config.Tik_reg_ph_3D, rho=config.rho_3D, lambda_re=config.TV_reg_ph_3D,
itr=config.itr_3D)
store = zarr.open(os.path.join(save_folder, '2T_3P_81Z_231Y_498X_Kazansky.zarr'), 'r')
array = store['Row_0']['Col_1']['Pos_001']['array']
# Check Shape
assert(array.shape == (1, len(config.output_channels), 81, 231, 498))
# Check Phase
assert (np.sum(np.abs(phase3D[z] - array[0, 0, z]) ** 2) / np.sum(np.abs(phase3D[z])**2) < 0.1)
def test_2D_reconstruction(setup_BF_test_data_zarr, setup_data_save_folder):
folder, data = setup_BF_test_data_zarr
save_folder = setup_data_save_folder
path_to_config = os.path.join(dirname(dirname(abspath(__file__))), 'test_configs/phase/config_phase_2D_pytest.yml')
config = ConfigReader(path_to_config, data_dir=data, save_dir=save_folder)
manager = PipelineManager(config)
assert(manager.pipeline.mode == '2D')
manager.run()
pos, t, z = 1, 0, manager.pipeline.focus_slice
data = manager.data.get_array(pos)
recon = manager.pipeline.reconstructor
phase2D = reconstruct_phase2D(np.transpose(data[t, 0], (1, 2, 0)), recon, method=config.phase_denoiser_2D,
reg_p=config.Tik_reg_ph_2D, rho=config.rho_2D, lambda_p=config.TV_reg_ph_2D,
itr=config.itr_2D)
store = zarr.open(os.path.join(save_folder, '2T_3P_81Z_231Y_498X_Kazansky.zarr'), 'r')
array = store['Row_0']['Col_1']['Pos_001']['array']
# Check Shapes
assert(array.shape == (1, len(config.output_channels), 1, 231, 498))
# Check Phase
assert (np.sum(
|
np.abs(phase2D - array[0, 0, 0])
|
numpy.abs
|
import numpy
from scipy.integrate import quad
from scipy.special import erf, jv
from DarkMatterUtilities.Constants import *
## -- Class Definitions -- ##
# Defines a detector made of a single material.
# Default is a 100 kg xenon target
class Target:
Name = "Xenon" # name of target
A = 1.0 # amu "dimensionless"
Z = 1.0 # amu "dimensionless"
TotalMass = 100.0 # kg of detector
ExposureTime = 1.0 # years of operation
NuclearMass_GeV = A * amu_to_GeV # nuclear mass in GeV
NuclearMass_kg = A * amu_to_kg # nuclear mass in kg
FF_type = 4 # Which form factor to use
FF_Rn = 1.0 # nuclear form factor radius [fm]
FF_alpha = 1./3. # nuclear form factor parameterization [dimensionless] only impacts FF type 2
def __init__(self, _A, _Z, _TotalMass, _ExposureTime, _Name, _FF_Rn):
self.A = _A
self.Z = _Z
self.TotalMass = _TotalMass
self.ExposureTime = _ExposureTime
self.NuclearMass_GeV = _A * amu_to_GeV
self.NuclearMass_kg = _A * amu_to_kg
self.Name = _Name
self.FF_Rn = _FF_Rn
def ReducedMass_Nucleus_amu(self, _mass_amu):
# Returns the reduced mass of the system consisting of target nucleus and a second mass specified in amu
_numerator = self.A * _mass_amu
_denominator = self.A + _mass_amu
return (_numerator / _denominator)
def ReducedMass_Nucleus_GeV(self, _mass_GeV):
# Returns the reduced mass of the system consisting of target nucleus and a second mass specified in GeV
_numerator = self.NuclearMass_GeV * _mass_GeV
_denominator = self.NuclearMass_GeV + _mass_GeV
return (_numerator / _denominator)
def ReducedMass_Nucleus_kg(self, _mass_kg):
# Returns the reduced mass of the system consisting of target nucleus and a second mass specified in kg
_numerator = self.NuclearMass_kg * _mass_kg
_denominator = self.NuclearMass_kg + _mass_kg
return (_numerator / _denominator)
def FormFactor(self, _Er_keV):
# Evaluates the dimensionless form factor for a given momentum transfer (recoil energy)
# See Lewin and Smith (1996) for description
# Helm form factor:
_alpha = self.FF_alpha # dimensionless
_rn = self.FF_Rn # fm
_s = 1.0 # fm
_q = numpy.sqrt(2.0 * self.NuclearMass_GeV * _Er_keV) # MeV / c
_q_fm = numpy.sqrt(2.0 * self.NuclearMass_GeV * _Er_keV * 1e-6) / 0.197 # fm^-1
_qrn = _q * ( _rn / hbarc_MeV_fm) # dimensionless
_qs = _q * ( _s / hbarc_MeV_fm) # dimensionless
if ( self.FF_type == 0 ):
## Lewin & Smith -- thin shell: exp[-(q r_n)^(2/3) / 3]
return numpy.exp(-1.0 * numpy.power(_qrn,2./3.) / 3.0 )
elif ( self.FF_type == 1 ):
## Lewin & Smith -- thin shell: [ sin(q r_n) / (q r_n) ]^2
## Confirmed to match spectrum from L&S
return numpy.power( numpy.sin(_qrn) / _qrn , 2.)
elif ( self.FF_type == 2 ):
## Lewin & Smith -- solid sphere: exp[-(q r_n)^(2/3) / 5]
return numpy.exp(-1.0 * numpy.power(_qrn,2./3.) / 5.0 )
elif ( self.FF_type == 3 ):
## Lewin & Smith -- solid sphere: { 3 [ sin(q r_n) - q r_n cos(q r_n)] / (q r_n)^3 }^2
## Confirmed to match spectrum from L&S
_arg1 = numpy.sin(_qrn) - (_qrn * numpy.cos(_qrn))
_arg2 = _arg1 / numpy.power(_qrn,3.)
return numpy.power(3.0 * _arg2,2.)
# return 3.0*_arg2 * numpy.exp( - (_qs**2)/2.)
elif (self.FF_type == 4 ):
return self.HelmFormFactor(_Er_keV)
elif (self.FF_type == 5 ):
return self.HelmFormFactor_DW(_Er_keV)
else:
if _Er_keV <= 1e5:
return 1.0
else:
return 0
def HelmFormFactor(self, _Er_keV):
# [arXiv:0608035] Duda et al 2007 (consistent with DMCalc implementation)
_a = 0.52 # fm
_s = 0.9 # fm
_c = (1.23 * numpy.power(self.A,1./3.)) - 0.60 # fm
_R1 = numpy.sqrt( (_c**2)
+ ( (7./3.)*(numpy.pi**2)*(_a**2) )
- ( 5.0*(_s**2) ) ) # fm
_q = numpy.sqrt(2.0 * _Er_keV * self.NuclearMass_GeV) # MeV / c
_qs = (_s / hbarc_MeV_fm) * _q # dimensionless
_qR1 = (_R1 / hbarc_MeV_fm) * _q # dimensionless
_expfac = numpy.exp(-1.0 * _qs**2) # dimensionless
_jterm = ( 3.0 * jv(1, _qR1) / _qR1 )**2 # dimensionless
return _jterm * _expfac
def HelmFormFactor_DW(self, _Er_keV):
_s = 1.0 # fm
_R = (1.20 * numpy.power(self.A,1./3.)) # fm
_r = numpy.power((numpy.power(_R,2)-5*numpy.power(_s,2)),0.5) # m
_q = numpy.sqrt(2.0 * _Er_keV * self.NuclearMass_GeV) # MeV / c
_qs = (_s / hbarc_MeV_fm) * _q # dimensionless
_qr = (_r / hbarc_MeV_fm) * _q # dimensionless
_expfac = numpy.exp(-numpy.power(_qs,2)) # dimensionless
_jterm = (numpy.sin(_qr)/(numpy.power(_qr,2)))-(numpy.cos(_qr)/_qr) # dimensionless
return pow(((3*_jterm)/(_qr)),2)*_expfac
def LindhardFactor(self, _Er_keV):
# Determine the lindhard factor for a nuclear recoil of a specified recoil energy in keV
_Z = self.Z
_e = 11.5 * _Er_keV * numpy.power(_Z, -7./3.)
_k = 0.133 * numpy.power(_Z, 2./3.) * numpy.power(self.A, -1./2.)
_g = (3.0 * numpy.power(_e, 0.15)) + (0.7 * numpy.power(_e, 0.6)) + _e
_LF = (_k * _g) / ( 1. + (_k * _g))
return _LF
def RecoilEnergyAngularDist(self, _theta):
# Given a fixed angle for the outgoing particle, what is the
# energy fraction deposited (this is recoil energy)
_m1 = m_neutron_GeV
_m2 = self.NuclearMass_GeV
_Mfrac = _m1 * _m1 /
|
numpy.power(_m1+_m2,2.)
|
numpy.power
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test a full run of the codes from the command line."""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import hendrics as hen
import logging
import os
import glob
import subprocess as sp
import numpy as np
from astropy.tests.helper import catch_warnings
from astropy.io import fits
import pytest
from stingray.lightcurve import Lightcurve
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
HEN_FILE_EXTENSION = hen.io.HEN_FILE_EXTENSION
logging.basicConfig(filename='HEN.log', level=logging.DEBUG, filemode='w')
class TestFullRun(object):
"""Test how command lines work.
Usually considered bad practice, but in this
case I need to test the full run of the codes, and files depend on each
other.
Inspired by http://stackoverflow.com/questions/5387299/python-unittest-testcase-execution-order
When command line is missing, uses some function calls
""" # NOQA
@classmethod
def setup_class(cls):
curdir = os.path.abspath(os.path.dirname(__file__))
cls.datadir = os.path.join(curdir, 'data')
cls.first_event_file = os.path.join(cls.datadir,
'monol_testA_nustar_fpma_ev' +
HEN_FILE_EXTENSION)
def test_scripts_are_installed(self):
"""Test only once that command line scripts are installed correctly."""
fits_file = os.path.join(self.datadir, 'monol_testA.evt')
command = 'HENreadfile {0}'.format(fits_file)
sp.check_call(command.split())
def test_fake_file(self):
"""Test produce a fake event file."""
fits_file = os.path.join(self.datadir, 'monol_test_fake.evt')
hen.fake.main(['-o', fits_file, '--instrument', 'FPMB'])
info = hen.io.print_fits_info(fits_file, hdu=1)
assert info['Instrument'] == 'FPMB'
def test_fake_file_from_input_lc(self):
"""Test produce a fake event file from input light curve."""
lcurve_in = os.path.join(self.datadir, 'lcurveA.fits')
fits_file = os.path.join(self.datadir, 'monol_test_fake_lc.evt')
hen.fake.main(['--lc', lcurve_in, '-o', fits_file])
def test_fake_file_with_deadtime(self):
"""Test produce a fake event file and apply deadtime."""
fits_file = os.path.join(self.datadir, 'monol_test_fake_lc.evt')
hen.fake.main(['--deadtime', '2.5e-3',
'--ctrate', '2000',
'-o', fits_file])
def test_fake_file_xmm(self):
"""Test produce a fake event file and apply deadtime."""
fits_file = os.path.join(self.datadir, 'monol_test_fake_lc_xmm.evt')
hen.fake.main(['--deadtime', '1e-4', '-m', 'XMM', '-i', 'epn',
'--ctrate', '2000',
'-o', fits_file])
hdu_list = fits.open(fits_file)
hdunames = [hdu.name for hdu in hdu_list]
assert 'STDGTI01' in hdunames
assert 'STDGTI02' in hdunames
assert 'STDGTI07' in hdunames
def test_load_events(self):
"""Test event file reading."""
command = '{0}'.format(
os.path.join(self.datadir, 'monol_testA.evt'))
hen.read_events.main(command.split())
new_filename = self.first_event_file
ev = hen.io.load_events(new_filename)
assert hasattr(ev, 'header')
assert hasattr(ev, 'gti')
def test_load_events_with_2_cpus(self):
"""Test event file reading."""
command = '{0} {1} --nproc 2'.format(
os.path.join(self.datadir, 'monol_testB.evt'),
os.path.join(self.datadir, 'monol_testA_timezero.evt'),
os.path.join(self.datadir, 'monol_test_fake.evt'))
hen.read_events.main(command.split())
def test_load_events_split(self):
"""Test event file splitting."""
command = \
'{0} -g --min-length 0'.format(
os.path.join(self.datadir, 'monol_testB.evt'))
hen.read_events.main(command.split())
new_filename = os.path.join(self.datadir,
'monol_testB_nustar_fpmb_gti000_ev' +
HEN_FILE_EXTENSION)
assert os.path.exists(new_filename)
def test_save_binary_events(self):
f = self.first_event_file
with pytest.raises(ValueError) as excinfo:
hen.binary.main_presto("{} -b 0.1 -e 3 59".format(f).split())
assert 'Energy filtering requested' in str(excinfo)
def test_load_gtis(self):
"""Test loading of GTIs from FITS files."""
fits_file = os.path.join(self.datadir, 'monol_testA.evt')
hen.io.load_gtis(fits_file)
def test_load_events_noclobber(self):
"""Test event file reading w. noclobber option."""
with catch_warnings() as w:
command = \
'{0} --noclobber'.format(
os.path.join(self.datadir, 'monol_testB.evt'))
hen.read_events.main(command.split())
assert str(w[0].message).strip().endswith(
"exists and using noclobber. Skipping"), \
"Unexpected warning output"
def test_load_events_xmm(self):
"""Test event file reading."""
command = '{0}'.format(
os.path.join(self.datadir, 'monol_test_fake_lc_xmm.evt'))
hen.read_events.main(command.split())
def test_calibrate(self):
"""Test event file calibration."""
from astropy.io.fits import Header
command = '{0} -r {1}'.format(
os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev' + HEN_FILE_EXTENSION),
os.path.join(self.datadir, 'test.rmf'))
hen.calibrate.main(command.split())
new_filename = os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev_calib' +
HEN_FILE_EXTENSION)
assert os.path.exists(new_filename)
ev = hen.io.load_events(new_filename)
assert hasattr(ev, 'header')
Header.fromstring(ev.header)
assert hasattr(ev, 'gti')
gti_to_test = hen.io.load_events(self.first_event_file).gti
assert np.allclose(gti_to_test, ev.gti)
def test_save_binary_calibrated_events(self):
f = os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev_calib' +
HEN_FILE_EXTENSION)
hen.binary.main_presto("{} -b 0.1 -e 3 59".format(f).split())
assert os.path.exists(f.replace(HEN_FILE_EXTENSION, '.dat'))
assert os.path.exists(f.replace(HEN_FILE_EXTENSION, '.inf'))
def test_calibrate_2_cpus(self):
"""Test event file calibration."""
command = '{0} -r {1} --nproc 2'.format(
os.path.join(self.datadir,
'monol_testB_nustar_fpmb_ev' + HEN_FILE_EXTENSION),
os.path.join(self.datadir, 'test.rmf'))
hen.calibrate.main(command.split())
assert os.path.exists(os.path.join(self.datadir,
'monol_testB_nustar_fpmb_ev_calib' +
HEN_FILE_EXTENSION))
def test_save_varen_rms(self):
fname = os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev_calib' +
HEN_FILE_EXTENSION)
hen.varenergy.main([fname, "-f", "0", "100", "--energy-values",
"0.3", "12", "5", "lin", "--rms", "-b", "0.5",
"--segment-size", "128"])
out = hen.base.hen_root(fname) + "_rms" + '.qdp'
os.path.exists(out)
def test_save_varen_lag(self):
fname = os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev_calib' +
HEN_FILE_EXTENSION)
hen.varenergy.main([fname, "-f", "0", "100", "--energy-values",
"0.3", "12", "5", "lin", "--lag", "-b", "0.5",
"--segment-size", "128"])
out = hen.base.hen_root(fname) + "_lag" + '.qdp'
os.path.exists(out)
def test_lcurve(self):
"""Test light curve production."""
from astropy.io.fits import Header
command = ('{0} -e {1} {2} --safe-interval '
'{3} {4} --nproc 2 -b 0.5 -o {5}').format(
os.path.join(self.datadir, 'monol_testA_nustar_fpma_ev_calib' +
HEN_FILE_EXTENSION),
3, 50, 100, 300,
os.path.join(self.datadir, 'monol_testA_E3-50_lc' +
HEN_FILE_EXTENSION)
)
hen.lcurve.main(command.split())
new_filename = \
os.path.join(os.path.join(self.datadir,
'monol_testA_E3-50_lc' +
HEN_FILE_EXTENSION))
assert os.path.exists(new_filename)
lc = hen.io.load_lcurve(new_filename)
assert hasattr(lc, 'header')
# Test that the header is correctly conserved
Header.fromstring(lc.header)
assert hasattr(lc, 'gti')
gti_to_test = hen.io.load_events(self.first_event_file).gti
assert np.allclose(gti_to_test, lc.gti)
def test_save_binary_lc(self):
f = \
os.path.join(os.path.join(self.datadir,
'monol_testA_E3-50_lc' +
HEN_FILE_EXTENSION))
hen.binary.main_presto("{}".format(f).split())
assert os.path.exists(f.replace(HEN_FILE_EXTENSION, '.dat'))
assert os.path.exists(f.replace(HEN_FILE_EXTENSION, '.inf'))
def test_lcurve_B(self):
command = ('{0} -e {1} {2} --safe-interval '
'{3} {4} -b 0.5 -o {5}').format(
os.path.join(self.datadir, 'monol_testB_nustar_fpmb_ev_calib' +
HEN_FILE_EXTENSION),
3, 50, 100, 300,
os.path.join(self.datadir, 'monol_testB_E3-50_lc' +
HEN_FILE_EXTENSION))
hen.lcurve.main(command.split())
assert os.path.exists(os.path.join(self.datadir,
'monol_testB_E3-50_lc' +
HEN_FILE_EXTENSION))
def test_lcurve_from_split_event(self):
"""Test lc reading of split event file."""
command = '{0}'.format(
os.path.join(self.datadir, 'monol_testB_nustar_fpmb_gti000_ev' +
HEN_FILE_EXTENSION))
hen.lcurve.main(command.split())
new_filename = os.path.join(self.datadir,
'monol_testB_nustar_fpmb_gti000_lc' +
HEN_FILE_EXTENSION)
assert os.path.exists(new_filename)
lc = hen.io.load_lcurve(new_filename)
gti_to_test = hen.io.load_events(self.first_event_file).gti[0]
assert np.allclose(gti_to_test, lc.gti)
def test_lcurve_split(self):
"""Test lc with gti-split option."""
command = '{0} {1} -g'.format(
os.path.join(self.datadir, 'monol_testA_nustar_fpma_ev' +
HEN_FILE_EXTENSION),
os.path.join(self.datadir, 'monol_testB_nustar_fpmb_ev' +
HEN_FILE_EXTENSION))
hen.lcurve.main(command.split())
new_filename = os.path.join(self.datadir,
'monol_testA_nustar_fpma_gti000_lc' +
HEN_FILE_EXTENSION)
assert os.path.exists(new_filename)
lc = hen.io.load_lcurve(new_filename)
gti_to_test = hen.io.load_events(self.first_event_file).gti[0]
assert np.allclose(gti_to_test, lc.gti)
def test_fits_lcurve0(self):
"""Test light curves from FITS."""
lcurve_ftools_orig = os.path.join(self.datadir, 'lcurveA.fits')
lcurve_ftools = os.path.join(self.datadir,
'lcurve_ftools_lc' +
HEN_FILE_EXTENSION)
command = "{0} --outfile {1}".format(
os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev') + HEN_FILE_EXTENSION,
os.path.join(self.datadir,
'lcurve_lc'))
hen.lcurve.main(command.split())
assert os.path.exists(os.path.join(self.datadir,
'lcurve_lc') + HEN_FILE_EXTENSION)
command = "--fits-input {0} --outfile {1}".format(
lcurve_ftools_orig,
lcurve_ftools)
hen.lcurve.main(command.split())
def test_fits_lcurve1(self):
"""Test light curves from FITS."""
lcurve_ftools = os.path.join(self.datadir,
'lcurve_ftools_lc' +
HEN_FILE_EXTENSION)
lcurve_mp = os.path.join(self.datadir,
'lcurve_lc' +
HEN_FILE_EXTENSION)
lcdata_mp = hen.io.load_data(lcurve_mp)
lcdata_ftools = hen.io.load_data(lcurve_ftools)
lc_mp = lcdata_mp['counts']
lenmp = len(lc_mp)
lc_ftools = lcdata_ftools['counts']
lenftools = len(lc_ftools)
goodlen = min([lenftools, lenmp])
diff = lc_mp[:goodlen] - lc_ftools[:goodlen]
assert np.all(np.abs(diff) <= 1e-3), \
'Light curve data do not coincide between FITS and HEN'
def test_txt_lcurve(self):
"""Test light curves from txt."""
lcurve_mp = os.path.join(self.datadir,
'lcurve_lc' +
HEN_FILE_EXTENSION)
lcdata_mp = hen.io.load_data(lcurve_mp)
lc_mp = lcdata_mp['counts']
time_mp = lcdata_mp['time']
lcurve_txt_orig = os.path.join(self.datadir,
'lcurve_txt_lc.txt')
hen.io.save_as_ascii([time_mp, lc_mp], lcurve_txt_orig)
lcurve_txt = os.path.join(self.datadir,
'lcurve_txt_lc' +
HEN_FILE_EXTENSION)
hen.lcurve.main(['--txt-input', lcurve_txt_orig,
'--outfile', lcurve_txt])
lcdata_txt = hen.io.load_data(lcurve_txt)
lc_txt = lcdata_txt['counts']
assert np.all(np.abs(lc_mp - lc_txt) <= 1e-3), \
'Light curve data do not coincide between txt and HEN'
def test_joinlcs(self):
"""Test produce joined light curves."""
new_filename = os.path.join(
self.datadir, 'monol_test_joinlc' + HEN_FILE_EXTENSION)
# because join_lightcurves separates by instrument
new_actual_filename = os.path.join(
self.datadir, 'FPMAmonol_test_joinlc' + HEN_FILE_EXTENSION)
hen.lcurve.join_lightcurves(
glob.glob(os.path.join(self.datadir,
'monol_testA_nustar_fpma_gti[0-9][0-9][0-9]_lc*')) +
glob.glob(os.path.join(self.datadir,
'monol_testB_nustar_fpmb_gti[0-9][0-9][0-9]_lc*')),
new_filename)
lc = hen.io.load_lcurve(new_actual_filename)
assert hasattr(lc, 'gti')
gti_to_test = hen.io.load_events(self.first_event_file).gti
assert np.allclose(gti_to_test, lc.gti)
def test_scrunchlcs(self):
"""Test produce scrunched light curves."""
a_in = os.path.join(self.datadir,
'monol_testA_E3-50_lc' + HEN_FILE_EXTENSION)
b_in = os.path.join(self.datadir,
'monol_testB_E3-50_lc' + HEN_FILE_EXTENSION)
out = os.path.join(self.datadir,
'monol_test_scrunchlc' + HEN_FILE_EXTENSION)
command = '{0} {1} -o {2}'.format(a_in, b_in, out)
hen.lcurve.scrunch_main(command.split())
a_lc = hen.io.load_lcurve(a_in)
b_lc = hen.io.load_lcurve(b_in)
out_lc = hen.io.load_lcurve(out)
assert np.all(out_lc.counts == a_lc.counts + b_lc.counts)
gti_to_test = hen.io.load_events(self.first_event_file).gti
assert np.allclose(gti_to_test, out_lc.gti)
def testbaselinelc(self):
"""Test produce scrunched light curves."""
a_in = os.path.join(self.datadir,
'monol_testA_E3-50_lc' + HEN_FILE_EXTENSION)
out = os.path.join(self.datadir, 'monol_test_baselc')
command = '{0} -o {1} -p 0.001 --lam 1e5'.format(a_in, out)
hen.lcurve.baseline_main(command.split())
out_lc = hen.io.load_lcurve(out + '_0' + HEN_FILE_EXTENSION)
assert hasattr(out_lc, 'base')
gti_to_test = hen.io.load_events(self.first_event_file).gti
assert np.allclose(gti_to_test, out_lc.gti)
def testbaselinelc_nooutroot(self):
"""Test produce scrunched light curves."""
a_in = os.path.join(self.datadir,
'monol_testA_E3-50_lc' + HEN_FILE_EXTENSION)
command = '{0} -p 0.001 --lam 1e5'.format(a_in)
hen.lcurve.baseline_main(command.split())
out_lc = hen.io.load_lcurve(
hen.base.hen_root(a_in) + '_lc_baseline' + HEN_FILE_EXTENSION)
assert hasattr(out_lc, 'base')
gti_to_test = hen.io.load_events(self.first_event_file).gti
assert np.allclose(gti_to_test, out_lc.gti)
def test_lcurve_error_uncalibrated(self):
"""Test light curve error from uncalibrated file."""
command = ('{0} -e {1} {2}').format(
os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev' + HEN_FILE_EXTENSION),
3, 50)
with pytest.raises(ValueError) as excinfo:
hen.lcurve.main(command.split())
message = str(excinfo.value)
assert str(message).strip().endswith("Did you run HENcalibrate?")
def test_lcurve_pi_filtering(self):
"""Test light curve using PI filtering."""
command = ('{0} --pi-interval {1} {2}').format(
os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev' + HEN_FILE_EXTENSION),
10, 300)
hen.lcurve.main(command.split())
def test_colors_fail_uncalibrated(self):
"""Test light curve using PI filtering."""
command = ('{0} -b 100 -e {1} {2} {2} {3}').format(
os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev' + HEN_FILE_EXTENSION),
3, 5, 10)
with pytest.raises(ValueError) as excinfo:
hen.colors.main(command.split())
assert "No energy information is present " in str(excinfo.value)
def test_colors(self):
"""Test light curve using PI filtering."""
# calculate colors
command = ('{0} -b 100 -e {1} {2} {2} {3}').format(
os.path.join(self.datadir,
'monol_testA_nustar_fpma_ev_calib' +
HEN_FILE_EXTENSION),
3, 5, 10)
hen.colors.main(command.split())
new_filename = \
os.path.join(self.datadir,
'monol_testA_nustar_fpma_E_10-5_over_5-3' +
HEN_FILE_EXTENSION)
assert os.path.exists(new_filename)
out_lc = hen.io.load_lcurve(new_filename)
gti_to_test = hen.io.load_events(self.first_event_file).gti
assert np.allclose(gti_to_test, out_lc.gti)
def test_pds_leahy(self):
"""Test PDS production."""
lc = os.path.join(self.datadir,
'monol_testA_E3-50_lc') + HEN_FILE_EXTENSION
hen.io.main([lc])
command = \
'{0} -f 128 -k PDS --norm leahy'.format(lc)
hen.fspec.main(command.split())
assert os.path.exists(
os.path.join(self.datadir,
'monol_testA_E3-50_pds' + HEN_FILE_EXTENSION))
def test_pds(self):
"""Test PDS production."""
command = \
'{0} {1} -f 128 --save-dyn -k PDS --norm frac --nproc 2 '.format(
os.path.join(self.datadir,
'monol_testA_E3-50_lc') + HEN_FILE_EXTENSION,
os.path.join(self.datadir,
'monol_testB_E3-50_lc') + HEN_FILE_EXTENSION)
hen.fspec.main(command.split())
assert os.path.exists(
os.path.join(self.datadir,
'monol_testB_E3-50_pds' + HEN_FILE_EXTENSION))
assert os.path.exists(
os.path.join(self.datadir,
'monol_testA_E3-50_pds') + HEN_FILE_EXTENSION)
def test_pds_fits(self):
"""Test PDS production with light curves obtained from FITS files."""
lcurve_ftools = os.path.join(self.datadir,
'lcurve_ftools_lc' +
HEN_FILE_EXTENSION)
command = '{0} -f 128'.format(lcurve_ftools)
hen.fspec.main(command.split())
def test_pds_txt(self):
"""Test PDS production with light curves obtained from txt files."""
lcurve_txt = os.path.join(self.datadir,
'lcurve_txt_lc' +
HEN_FILE_EXTENSION)
command = '{0} -f 128'.format(lcurve_txt)
hen.fspec.main(command.split())
def test_cpds_rms_norm(self):
"""Test CPDS production."""
command = \
'{0} {1} -f 128 --save-dyn -k CPDS --norm rms -o {2}'.format(
os.path.join(self.datadir, 'monol_testA_E3-50_lc') +
HEN_FILE_EXTENSION,
os.path.join(self.datadir, 'monol_testB_E3-50_lc') +
HEN_FILE_EXTENSION,
os.path.join(self.datadir, 'monol_test_E3-50'))
hen.fspec.main(command.split())
def test_cpds_wrong_norm(self):
"""Test CPDS production."""
command = \
'{0} {1} -f 128 --save-dyn -k CPDS --norm blablabla -o {2}'.format(
os.path.join(self.datadir, 'monol_testA_E3-50_lc') +
HEN_FILE_EXTENSION,
os.path.join(self.datadir, 'monol_testB_E3-50_lc') +
HEN_FILE_EXTENSION,
os.path.join(self.datadir, 'monol_test_E3-50'))
with pytest.warns(UserWarning) as record:
hen.fspec.main(command.split())
assert np.any(["Beware! Unknown normalization" in r.message.args[0]
for r in record])
def test_cpds(self):
"""Test CPDS production."""
command = \
'{0} {1} -f 128 --save-dyn -k CPDS --norm frac -o {2}'.format(
os.path.join(self.datadir, 'monol_testA_E3-50_lc') +
HEN_FILE_EXTENSION,
os.path.join(self.datadir, 'monol_testB_E3-50_lc') +
HEN_FILE_EXTENSION,
os.path.join(self.datadir, 'monol_test_E3-50'))
hen.fspec.main(command.split())
def test_cpds_2cpus(self):
"""Test CPDS production."""
command = \
('{0} {1} -f 128 --save-dyn -k '
'CPDS --norm frac -o {2} --nproc 2').format(
os.path.join(self.datadir, 'monol_testA_E3-50_lc') +
HEN_FILE_EXTENSION,
os.path.join(self.datadir, 'monol_testB_E3-50_lc') +
HEN_FILE_EXTENSION,
os.path.join(self.datadir, 'monol_test_E3-50'))
hen.fspec.main(command.split())
# def test_dumpdynpds(self):
# """Test dump dynamical PDSs."""
# command = '--noplot ' + \
# os.path.join(self.datadir,
# 'monol_testA_E3-50_pds') + \
# HEN_FILE_EXTENSION
# hen.fspec.dumpdyn_main(command.split())
def test_sumpds(self):
"""Test the sum of pdss."""
hen.sum_fspec.main([
os.path.join(self.datadir,
'monol_testA_E3-50_pds') + HEN_FILE_EXTENSION,
os.path.join(self.datadir,
'monol_testB_E3-50_pds') + HEN_FILE_EXTENSION,
'-o', os.path.join(self.datadir,
'monol_test_sum' + HEN_FILE_EXTENSION)])
# def test_dumpdyncpds(self):
# """Test dumping CPDS file."""
# command = '--noplot ' + \
# os.path.join(self.datadir,
# 'monol_test_E3-50_cpds') + \
# HEN_FILE_EXTENSION
# hen.fspec.dumpdyn_main(command.split())
def test_rebinlc(self):
"""Test LC rebinning."""
command = '{0} -r 4'.format(
os.path.join(self.datadir, 'monol_testA_E3-50_lc') +
HEN_FILE_EXTENSION)
hen.rebin.main(command.split())
def test_rebinpds(self):
"""Test PDS rebinning 1."""
command = '{0} -r 2'.format(
os.path.join(self.datadir, 'monol_testA_E3-50_pds') +
HEN_FILE_EXTENSION)
hen.rebin.main(command.split())
os.path.exists(os.path.join(self.datadir,
'monol_testA_E3-50_pds_rebin2' +
HEN_FILE_EXTENSION))
def test_rebinpds_geom(self):
"""Test geometrical PDS rebinning."""
command = '{0} {1} -r 1.03'.format(
os.path.join(self.datadir, 'monol_testA_E3-50_pds') +
HEN_FILE_EXTENSION,
os.path.join(self.datadir, 'monol_testB_E3-50_pds') +
HEN_FILE_EXTENSION
)
hen.rebin.main(command.split())
os.path.exists(os.path.join(self.datadir,
'monol_testA_E3-50_pds_rebin1.03' +
HEN_FILE_EXTENSION))
os.path.exists(os.path.join(self.datadir,
'monol_testB_E3-50_pds_rebin1.03' +
HEN_FILE_EXTENSION))
def test_rebincpds(self):
"""Test CPDS rebinning."""
command = '{0} -r 2'.format(
os.path.join(self.datadir, 'monol_test_E3-50_cpds') +
HEN_FILE_EXTENSION)
hen.rebin.main(command.split())
os.path.exists(os.path.join(self.datadir,
'monol_test_E3-50_cpds_rebin2' +
HEN_FILE_EXTENSION))
def test_rebincpds_geom(self):
"""Test CPDS geometrical rebinning."""
command = '{0} -r 1.03'.format(
os.path.join(self.datadir, 'monol_test_E3-50_cpds') +
HEN_FILE_EXTENSION)
hen.rebin.main(command.split())
os.path.exists(os.path.join(self.datadir,
'monol_test_E3-50_cpds_rebin1.03' +
HEN_FILE_EXTENSION))
def test_save_lags(self):
fname = os.path.join(self.datadir,
'monol_test_E3-50_cpds_rebin2' +
HEN_FILE_EXTENSION)
hen.timelags.main([fname])
out = hen.base.hen_root(fname) + '_lags.qdp'
os.path.exists(out)
def test_save_fvar(self):
fname = os.path.join(self.datadir,
'monol_testA_E3-50_lc' + HEN_FILE_EXTENSION)
hen.exvar.main([fname, "-c", "10", "--fraction-step", "0.6",
"--norm", "fvar"])
out = hen.base.hen_root(fname) + "_fvar" + '.qdp'
os.path.exists(out)
def test_save_excvar(self):
fname = os.path.join(self.datadir,
'monol_testA_E3-50_lc' +
HEN_FILE_EXTENSION)
hen.exvar.main([fname])
out = hen.base.hen_root(fname) + "_excvar" + '.qdp'
os.path.exists(out)
def test_save_excvar_norm(self):
fname = os.path.join(self.datadir,
'monol_testA_E3-50_lc' +
HEN_FILE_EXTENSION)
hen.exvar.main([fname, "--norm", "norm_excvar"])
out = hen.base.hen_root(fname) + "_norm_excvar" + '.qdp'
os.path.exists(out)
def test_save_excvar_wrong_norm(self):
fname = os.path.join(self.datadir,
'monol_testA_E3-50_lc' +
HEN_FILE_EXTENSION)
with pytest.raises(ValueError) as excinfo:
hen.exvar.main([fname, '--norm', 'cicciput'])
assert 'Normalization must be fvar, ' in str(excinfo.value)
def test_fit_pds(self):
modelstring = '''
from astropy.modeling import models
model = models.Const1D()
'''
modelfile = 'bubu__model__.py'
with open(modelfile, 'w') as fobj:
print(modelstring, file=fobj)
pdsfile1 = \
os.path.join(self.datadir,
'monol_testA_E3-50_pds' + HEN_FILE_EXTENSION)
pdsfile2 = \
os.path.join(self.datadir,
'monol_testB_E3-50_pds' + HEN_FILE_EXTENSION)
command = '{0} {1} -m {2} --frequency-interval 0 10'.format(
pdsfile1,
pdsfile2,
modelfile)
hen.modeling.main_model(command.split())
out0 = os.path.join(self.datadir,
'monol_testA_E3-50_pds_bestfit.p')
out1 = os.path.join(self.datadir,
'monol_testB_E3-50_pds_bestfit.p')
assert os.path.exists(out0)
assert os.path.exists(out1)
m, k, c = hen.io.load_model(
os.path.join(self.datadir,
'monol_testB_E3-50_pds_bestfit.p'))
assert hasattr(m, 'amplitude')
os.unlink(out0)
os.unlink(out1)
out0 = os.path.join(self.datadir,
'monol_testA_E3-50_pds_fit' + HEN_FILE_EXTENSION)
out1 = os.path.join(self.datadir,
'monol_testB_E3-50_pds_fit' + HEN_FILE_EXTENSION)
assert os.path.exists(out0)
assert os.path.exists(out1)
spec = hen.io.load_pds(out0)
assert hasattr(spec, 'best_fits')
def test_fit_cpds(self):
modelstring = '''
from astropy.modeling import models
model = models.Const1D()
'''
modelfile = 'bubu__model__.py'
with open(modelfile, 'w') as fobj:
print(modelstring, file=fobj)
pdsfile1 = \
os.path.join(self.datadir,
'monol_test_E3-50_cpds' + HEN_FILE_EXTENSION)
command = '{0} -m {1} --frequency-interval 0 10'.format(
pdsfile1,
modelfile)
hen.modeling.main_model(command.split())
out0 = os.path.join(self.datadir,
'monol_test_E3-50_cpds_bestfit.p')
assert os.path.exists(out0)
m, k, c = hen.io.load_model(out0)
assert hasattr(m, 'amplitude')
os.unlink(out0)
out0 = \
os.path.join(self.datadir,
'monol_test_E3-50_cpds_fit' + HEN_FILE_EXTENSION)
assert os.path.exists(out0)
spec = hen.io.load_pds(out0)
assert hasattr(spec, 'best_fits')
def test_fit_pds_f_no_of_intervals_invalid(self):
modelstring = '''
from astropy.modeling import models
model = models.Const1D()
'''
modelfile = 'bubu__model__.py'
with open(modelfile, 'w') as fobj:
print(modelstring, file=fobj)
pdsfile1 = \
os.path.join(self.datadir,
'monol_testA_E3-50_pds' + HEN_FILE_EXTENSION)
pdsfile2 = \
os.path.join(self.datadir,
'monol_testB_E3-50_pds' + HEN_FILE_EXTENSION)
command = '{0} {1} -m {2} --frequency-interval 0 1 9'.format(pdsfile1,
pdsfile2,
modelfile)
with pytest.raises(ValueError) as excinfo:
hen.modeling.main_model(command.split())
assert "Invalid number of frequencies specified" in str(excinfo.value)
# def test_dumpdyncpds_reb(self):
# """Test dumping rebinned CPDS file."""
# command = '--noplot ' + \
# os.path.join(self.datadir,
# 'monol_test_E3-50_cpds_rebin1.03') + \
# HEN_FILE_EXTENSION
# hen.fspec.dumpdyn_main(command.split())
def test_savexspec(self):
"""Test save as Xspec 1."""
command = '{0}'.format(
os.path.join(self.datadir, 'monol_testA_E3-50_pds_rebin2') +
HEN_FILE_EXTENSION)
hen.save_as_xspec.main(command.split())
os.path.exists(os.path.join(self.datadir,
'monol_testA_E3-50_pds_rebin2.pha'))
def test_savexspec_geom(self):
"""Test save as Xspec 2."""
command = '{0}'.format(
os.path.join(self.datadir, 'monol_test_E3-50_cpds_rebin1.03') +
HEN_FILE_EXTENSION)
hen.save_as_xspec.main(command.split())
os.path.exists(os.path.join(self.datadir,
'monol_test_E3-50_cpds_rebin1.03.pha'))
os.path.exists(
os.path.join(self.datadir,
'monol_test_E3-50_cpds_rebin1.03_lags.pha'))
def test_create_gti(self):
"""Test creating a GTI file."""
fname = os.path.join(self.datadir, 'monol_testA_E3-50_lc_rebin4') + \
HEN_FILE_EXTENSION
command = "{0} -f counts>0 -c --debug".format(fname)
hen.create_gti.main(command.split())
def test_apply_gti(self):
"""Test applying a GTI file."""
fname = os.path.join(self.datadir, 'monol_testA_E3-50_rebin4_gti') + \
HEN_FILE_EXTENSION
lcfname = os.path.join(self.datadir, 'monol_testA_E3-50_lc') + \
HEN_FILE_EXTENSION
lcoutname = os.path.join(self.datadir,
'monol_testA_E3-50_lc_gtifilt') + \
HEN_FILE_EXTENSION
command = "{0} -a {1} --debug".format(lcfname, fname)
hen.create_gti.main(command.split())
hen.io.load_lcurve(lcoutname)
def test_create_gti_and_minlen(self):
"""Test creating a GTI file and apply minimum length."""
fname = os.path.join(self.datadir, 'monol_testA_E3-50_lc_rebin4') + \
HEN_FILE_EXTENSION
command = "{0} -f counts>0 -c -l 10 --debug".format(fname)
hen.create_gti.main(command.split())
def test_create_gti_and_apply(self):
"""Test applying a GTI file and apply minimum length."""
fname = os.path.join(self.datadir, 'monol_testA_E3-50_rebin4_gti') + \
HEN_FILE_EXTENSION
lcfname = os.path.join(self.datadir, 'monol_testA_E3-50_lc') + \
HEN_FILE_EXTENSION
command = "{0} -a {1} -l 10 --debug".format(lcfname, fname)
hen.create_gti.main(command.split())
def test_readfile(self):
"""Test reading and dumping a HENDRICS file."""
fname = os.path.join(self.datadir, 'monol_testA_E3-50_rebin4_gti') + \
HEN_FILE_EXTENSION
command = "{0}".format(fname)
hen.io.main(command.split())
def test_readfile_fits(self):
"""Test reading and dumping a FITS file."""
fitsname = os.path.join(self.datadir, 'monol_testA.evt')
command = "{0}".format(fitsname)
hen.io.main(command.split())
def test_save_as_qdp(self):
"""Test saving arrays in a qdp file."""
arrays = [
|
np.array([0, 1, 3])
|
numpy.array
|
#!/usr/bin/env python3
""" Positioning visualizer tool """
import argparse
import json
import os
import queue
import re
import random
import sys
import time
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
from PIL import Image
import paho.mqtt.client as mqtt
WINDOW_WIDTH = 1024 # * 2
WINDOW_HEIGHT = 768 # * 2
MAX_NUM_TAGS = 300
PLOT_VIEW_3D = 1 # if 0, uses 2D view
PLOT_TAG_ADDRESSES = 1 # show tag address next to marker
PLOT_ROOM = 0 # only available in 3D view
PLOT_DEBUG_LINES = 1 # only available in 3D view
PLOT_DEBUG_LOCATORS = 1 # only available in 3D view
PLOT_MARKER_TRACES = 0 # only available in 3D view
MAX_NUM_TAG_LINES = 2 # if PLOT_DEBUG_LINES == 1, show lines to this many tags
MAX_NUM_LOCATOR_LINES = 5 # if PLOT_DEBUG_LINES == 1, show lines from this many locators
DEFAULT_CONFIG = os.path.join(os.path.dirname(__file__), "../positioning/config/positioning_config.json")
DEFAULT_CONNECTION = {"host": "localhost", "port": 1883}
pg.setConfigOptions(enableExperimental=True, useOpenGL=True)
def Rx(theta):
return np.array([[ 1, 0 , 0 ],
[ 0, np.cos(theta),-np.sin(theta)],
[ 0, np.sin(theta), np.cos(theta)]])
def Ry(theta):
return np.array([[ np.cos(theta), 0, np.sin(theta)],
[ 0 , 1, 0 ],
[-np.sin(theta), 0, np.cos(theta)]])
def Rz(theta):
return np.array([[ np.cos(theta), -np.sin(theta), 0 ],
[ np.sin(theta), np.cos(theta) , 0 ],
[ 0 , 0 , 1 ]])
class TextGLViewWidget(gl.GLViewWidget):
def __init__(self):
super(TextGLViewWidget, self).__init__()
self.textPositions = {} # Key shall be the text to be displayed and value shall be [x, y, z]
if "renderText" not in dir(self):
print("WARNING! Text rendering is not supported in 3D mode. Please try to use pyqtgraph 0.11.0")
def paintGL(self, *args, **kwds):
gl.GLViewWidget.paintGL(self, *args, **kwds)
for text, pos in self.textPositions.items():
try:
self.renderText(pos[0], pos[1], pos[2], text)
except:
pass
def setText(self, text, pos):
self.textPositions[text] = pos
class Visualizer(object):
def __init__(self):
self.marker_trace = False
if PLOT_MARKER_TRACES:
self.marker_trace = True
self.markerTraces = []
self.markerTraceStep = 10
self.numMarkerTraces = 20
self.numEstimatesPlotted = 0
if PLOT_VIEW_3D:
self.plotlines = PLOT_DEBUG_LINES
else:
self.plotlines = 0
self.app = QtGui.QApplication(sys.argv)
self.w = None
self.view = None
if PLOT_VIEW_3D:
self.w = TextGLViewWidget()
self.w.opts['distance'] = 25
self.w.setGeometry(200, 200, WINDOW_WIDTH, WINDOW_HEIGHT)
self.w.orbit(225, 90)
self.w.show()
planeColor = [226.0 / 255.0, 205.0 / 255.0, 155.0 / 255.0, 0.5]
z = np.zeros((20, 20))
p1 = gl.GLSurfacePlotItem(z=z, shader='shaded', color=planeColor, glOptions='additive')
p1.translate(-10, -10, 0)
self.w.addItem(p1)
xgrid = gl.GLGridItem(glOptions='additive')
xgrid.rotate(90, 0, 1, 0)
ygrid = gl.GLGridItem(glOptions='additive')
ygrid.rotate(90, 1, 0, 0)
zgrid = gl.GLGridItem(glOptions='additive')
# self.w.addItem(xgrid)
# self.w.addItem(ygrid)
self.w.addItem(zgrid)
axis = gl.GLAxisItem()
axis.setSize(x=15, y=15, z=15)
self.w.addItem(axis)
if PLOT_DEBUG_LOCATORS:
im = Image.open(r"img/locator.png")
self.img_tex_front = np.array(im)
imB = Image.open(r"img/locator_back.png")
self.img_tex_back = texB = np.array(imB)
self.img_scale = 4000.0
else:
# PLOT_VIEW_2D
self.view = pg.PlotWidget()
self.view.showGrid(x=True, y=True)
self.view.resize(WINDOW_WIDTH, WINDOW_HEIGHT)
self.view.show()
self.view.setXRange(-15, 15)
self.view.setYRange(-15, 15)
self.q_pos = queue.Queue()
self.q_ang = queue.Queue()
self.tags = {}
self.locators = {}
self.positioning_id = None
def start(self):
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
def plot_room(self):
# Only supported in 3D mode
if PLOT_VIEW_3D:
# This is an example on how to plot simple objects into the 3D view
vertexes = np.array([[1, 0, 0], #0
[0, 0, 0], #1
[0, 1, 0], #2
[0, 0, 1], #3
[1, 1, 0], #4
[1, 1, 1], #5
[0, 1, 1], #6
[1, 0, 1]], dtype=int)#7
faces = np.array([[1,0,7], [1,3,7],
[1,2,4], [1,0,4],
[1,2,6], [1,3,6],
[0,4,5], [0,7,5],
[2,4,5], [2,6,5],
[3,6,5], [3,7,5]])
shelfColor = [226.0 / 255.0, 205.0 / 255.0, 155.0 / 255.0, 0.5]
tableColor = [100.0 / 255.0, 100.0 / 255.0, 100.0 / 255.0, 0.5]
md = gl.MeshData(vertexes=vertexes, faces=faces)
# shelf
self.m1 = gl.GLMeshItem(meshdata=md, smooth=True, color=shelfColor, glOptions='translucent')
self.m1.translate(1.25, -5.9, 0.0)
self.m1.scale(3.20, 0.57, 1.26)
self.w.addItem(self.m1)
# table
self.m16 = gl.GLMeshItem(meshdata=md, smooth=True, color=tableColor, glOptions='translucent')
self.m16.translate(1.27, -4.5, 0.7)
self.m16.scale(2.0, 0.8, 0.05)
self.w.addItem(self.m16)
def plot_locator(self, loc_id):
if (PLOT_VIEW_3D != 1) or (PLOT_DEBUG_LOCATORS != 1):
return
# Draw front image
front = gl.GLImageItem(self.img_tex_front, glOptions='translucent')
front.translate(-self.img_tex_front.shape[0] / (2.0 * self.img_scale), -self.img_tex_front.shape[1] / (2.0 * self.img_scale), 0.005)
front.scale(1.0 / self.img_scale, 1.0 / self.img_scale, 1.0 / self.img_scale)
front.rotate(self.locators[loc_id]["orientation"]["z"], x=0, y=0, z=1, local=True)
front.rotate(self.locators[loc_id]["orientation"]["y"], x=0, y=1, z=0, local=True)
front.rotate(self.locators[loc_id]["orientation"]["x"], x=1, y=0, z=0, local=True)
front.translate(self.img_tex_front.shape[0] / (2.0 * self.img_scale), self.img_tex_front.shape[1] / (2.0 * self.img_scale), -0.005)
front.translate(self.locators[loc_id]["coordinate"]["x"], self.locators[loc_id]["coordinate"]["y"], self.locators[loc_id]["coordinate"]["z"])
front.translate(-self.img_tex_front.shape[0] / (2.0), -self.img_tex_front.shape[1] / (2.0), 0.005 * self.img_scale, local=True)
self.w.addItem(front)
self.locators[loc_id]["front"] = front
# Draw back image
back = gl.GLImageItem(self.img_tex_back, glOptions='translucent')
back.translate(-self.img_tex_back.shape[0] / (2.0 * self.img_scale), -self.img_tex_back.shape[1] / (2.0 * self.img_scale), -0.005)
back.scale(1.0 / self.img_scale, 1.0 / self.img_scale, 1.0 / self.img_scale)
back.rotate(self.locators[loc_id]["orientation"]["z"], x=0, y=0, z=1, local=True)
back.rotate(self.locators[loc_id]["orientation"]["y"], x=0, y=1, z=0, local=True)
back.rotate(self.locators[loc_id]["orientation"]["x"], x=1, y=0, z=0, local=True)
back.translate(self.img_tex_back.shape[0] / (2.0 * self.img_scale), self.img_tex_back.shape[1] / (2.0 * self.img_scale), 0.005)
back.translate(self.locators[loc_id]["coordinate"]["x"], self.locators[loc_id]["coordinate"]["y"], self.locators[loc_id]["coordinate"]["z"])
back.translate(-self.img_tex_back.shape[0] / (2.0), -self.img_tex_back.shape[1] / (2.0), -0.005 * self.img_scale, local=True)
self.w.addItem(back)
self.locators[loc_id]["back"] = back
def plot_line(self, tag_id, loc_id):
if (self.tags[tag_id]["sequence_nr"] >= MAX_NUM_TAG_LINES) or (self.locators[loc_id]["sequence_nr"] >= MAX_NUM_LOCATOR_LINES):
return
loc_x = self.locators[loc_id]["coordinate"]["x"]
loc_y = self.locators[loc_id]["coordinate"]["y"]
loc_z = self.locators[loc_id]["coordinate"]["z"]
rot_x = np.radians(self.locators[loc_id]["orientation"]["x"])
rot_y = np.radians(self.locators[loc_id]["orientation"]["y"])
rot_z = np.radians(self.locators[loc_id]["orientation"]["z"])
azimuth = np.radians(self.tags[tag_id]["angle"][loc_id]["azimuth"])
elevation =
|
np.radians(self.tags[tag_id]["angle"][loc_id]["elevation"])
|
numpy.radians
|
#!/usr/bin/env python
# coding: utf-8
# # COWS example
#
# This Jupyter Notebook is available [here](https://github.com/SimonPfeifer/cows/blob/master/docs/example.ipynb) and the Python .py file [here](https://github.com/SimonPfeifer/cows/blob/master/docs/example.py).
# In[1]:
import cows
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
def thick_slice(data, z, dz, operator='and'):
'''Returns a slice of the data at z
with thickness dz.
'''
zmin = int(z-np.round(dz/2))
zmax = int(z+np.round(dz/2))
if operator == 'and':
return np.any(data[zmin:zmax], axis=0).astype(int)
if operator == 'sum':
return np.sum(data[zmin:zmax], axis=0)
else:
raise ValueError('Invalid operator: {}'.format(operator))
# In[3]:
# Load the test data. It consists of a 64x64x64 cube of V-web data, a cosmic web classifier.
# In the context of the V-web, the values are:
# 0 - voids
# 1 - sheets
# 2 - filaments
# 3 - knots
data =
|
np.load('../tests/test_data.npy')
|
numpy.load
|
"""
Unit tests for the spline interpolator component.
"""
from copy import deepcopy
import unittest
import numpy as np
from openmdao.components.interp_util.interp import InterpND, SPLINE_METHODS, TABLE_METHODS
from openmdao.components.interp_util.interp_semi import InterpNDSemi, INTERP_METHODS
from openmdao.components.interp_util.outofbounds_error import OutOfBoundsError
from openmdao.utils.assert_utils import assert_near_equal, assert_equal_arrays
def rel_error(actual, computed):
return np.linalg.norm(actual - computed) / np.linalg.norm(actual)
scipy_gte_019 = True
try:
from scipy.interpolate._bsplines import make_interp_spline
except ImportError:
scipy_gte_019 = False
class InterpNDStandaloneFeatureTestcase(unittest.TestCase):
def test_interp_spline_akima(self):
xcp = np.array([1.0, 2.0, 4.0, 6.0, 10.0, 12.0])
ycp = np.array([5.0, 12.0, 14.0, 16.0, 21.0, 29.0])
n = 50
x = np.linspace(1.0, 12.0, n)
interp = InterpND(method='akima', points=xcp, x_interp=x, delta_x=0.1)
y = interp.evaluate_spline(ycp)
assert_near_equal(y,
np.array([ 5. , 7.20902005, 9.21276849, 10.81097162, 11.80335574,
12.1278001 , 12.35869145, 12.58588536, 12.81022332, 13.03254681,
13.25369732, 13.47451633, 13.69584534, 13.91852582, 14.14281484,
14.36710105, 14.59128625, 14.81544619, 15.03965664, 15.26399335,
15.48853209, 15.7133486 , 15.93851866, 16.16573502, 16.39927111,
16.63928669, 16.8857123 , 17.1384785 , 17.39751585, 17.66275489,
17.93412619, 18.21156029, 18.49498776, 18.78433915, 19.07954501,
19.38053589, 19.68724235, 19.99959495, 20.31752423, 20.64096076,
20.96983509, 21.37579297, 21.94811407, 22.66809748, 23.51629844,
24.47327219, 25.51957398, 26.63575905, 27.80238264, 29. ]),
tolerance=1e-6)
def test_interp_spline_akima_derivs(self):
xcp = np.array([1.0, 2.0, 4.0, 6.0, 10.0, 12.0])
ycp = np.array([5.0, 12.0, 14.0, 16.0, 21.0, 29.0])
n = 5
x = np.linspace(1.0, 12.0, n)
interp = InterpND(method='akima', points=xcp, x_interp=x, delta_x=0.1)
y, dy_dycp = interp.evaluate_spline(ycp, compute_derivative=True)
assert_near_equal(dy_dycp,
np.array([[ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[-1.86761492e-06, 3.31278014e-02, 1.05874907e+00,
-9.18750000e-02, 0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, -2.10964627e-01,
1.19119941e+00, 2.02602810e-02, -4.95062934e-04],
[ 0.00000000e+00, 0.00000000e+00, -2.64126732e-01,
5.82784977e-01, 6.83151998e-01, -1.81024253e-03],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
tolerance=1e-6)
def test_interp_spline_bsplines(self):
xcp = np.array([1.0, 2.0, 4.0, 6.0, 10.0, 12.0])
ycp = np.array([5.0, 12.0, 14.0, 16.0, 21.0, 29.0])
n = 50
x = np.linspace(1.0, 12.0, n)
interp = InterpND(method='bsplines', num_cp=6, x_interp=x)
y = interp.evaluate_spline(ycp)
assert_near_equal(y,
np.array([ 9.21614583, 9.90911525, 10.52244151, 11.06231159, 11.53491244,
11.94643105, 12.30305438, 12.61096939, 12.87636305, 13.10542234,
13.30433422, 13.47928566, 13.63646363, 13.7820551 , 13.92203064,
14.05954727, 14.19579437, 14.33192094, 14.46907599, 14.60840854,
14.75106758, 14.89820214, 15.05096121, 15.2104938 , 15.37794893,
15.5544756 , 15.74122282, 15.9393396 , 16.14997495, 16.37427787,
16.61339737, 16.86848247, 17.14102103, 17.43486416, 17.75486932,
18.10589772, 18.49281052, 18.92046894, 19.39373414, 19.91746734,
20.4965297 , 21.13578243, 21.8400867 , 22.61430372, 23.46329467,
24.39192074, 25.40504312, 26.507523 , 27.70422156, 29. ]),
tolerance=1e-6)
def test_table_interp(self):
# create input param training data, of sizes 25, 5, and 10 points resp.
p1 = np.linspace(0, 100, 25)
p2 = np.linspace(-10, 10, 5)
p3 = np.linspace(0, 1, 10)
# can use meshgrid to create a 3D array of test data
P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij')
f = np.sqrt(P1) + P2 * P3
interp = InterpND(method='lagrange3', points=(p1, p2, p3), values=f)
x = np.array([55.12, -2.14, 0.323])
f, df_dx = interp.interpolate(x, compute_derivative=True)
actual = np.array([6.73306794])
deriv_actual = np.array([[ 0.06734927, 0.323 , -2.14]])
assert_near_equal(f, actual, tolerance=1e-7)
assert_near_equal(df_dx, deriv_actual, tolerance=1e-7)
def test_cs_across_interp(self):
# The standalone interpolator is used inside of components, so the imaginary part must
# be carried through all operations to the outputs.
xcp = np.array([1.0, 2.0, 4.0, 6.0, 10.0, 12.0])
ycp = np.array([5.0, 12.0, 14.0, 16.0, 21.0, 29.0])
n = 50
x = np.linspace(1.0, 12.0, n)
ycp = np.array([[5.0 + 1j, 12.0, 14.0, 16.0, 21.0, 29.0],
[5.0, 12.0 + 1j, 14.0, 16.0, 21.0, 29.0]])
for method in SPLINE_METHODS:
# complex step not supported on scipy methods
if method.startswith('scipy'):
continue
interp = InterpND(method=method, points=xcp, x_interp=x)
y, dy = interp.evaluate_spline(ycp, compute_derivative=True)
self.assertTrue(y.dtype == complex)
if method in ['akima']:
# Derivs depend on values only for akima.
self.assertTrue(dy.dtype == complex)
p1 = np.linspace(0, 100, 25)
p2 = np.linspace(-10, 10, 5)
p3 = np.linspace(0, 1, 10)
# can use meshgrid to create a 3D array of test data
P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij')
f = np.sqrt(P1) + P2 * P3
x = np.array([[55.12 + 1j, -2.14, 0.323],
[55.12, -2.14 + 1j, 0.323],
[55.12, -2.14, 0.323 + 1j]])
for method in TABLE_METHODS:
# complex step not supported on scipy methods
if method.startswith('scipy'):
continue
interp = InterpND(method=method, points=(p1, p2, p3), values=f)
y, dy = interp.interpolate(x, compute_derivative=True)
self.assertTrue(y.dtype == complex)
self.assertTrue(dy.dtype == complex)
class TestInterpNDSemiPython(unittest.TestCase):
"""Tests for the standalone semi structured interp."""
def setUp(self):
self.interp_configs = {
"slinear": 2,
"lagrange2": 3,
"lagrange3": 4,
"akima": 4,
}
self.interp_methods = self.interp_configs.keys()
self.tol = {
"slinear": 5e-2,
"lagrange2": 5e-2,
"lagrange3": 1e-4,
"akima": 1e-3,
}
def _get_sample_2d(self):
# test problem with enough points for smooth spline fits
def f(u, v):
return u * np.cos(u * v) + v * np.sin(u * v)
def df(u, v):
return (-u * v * np.sin(u * v) + v**2 * np.cos(u * v) +
np.cos(u * v),
-u**2 * np.sin(u * v) + u * v * np.cos(u * v) +
np.sin(u * v))
# uniformly spaced axis
u = np.linspace(0, 3, 50)
# randomly spaced axis
np.random.seed(7590)
v = np.random.uniform(0, 3, 50)
v.sort()
points = [u, v]
values = f(*np.meshgrid(*points, indexing='ij'))
return points, values, f, df
def _get_sample_4d_large(self):
def f(x, y, z, w):
return x**2 + y**2 + z**2 + w**2
X = np.linspace(-10, 10, 6)
Y = np.linspace(-10, 10, 7)
np.random.seed(0)
Z = np.random.uniform(-10, 10, 6)
Z.sort()
W = np.linspace(-10, 10, 8)
points = [X, Y, Z, W]
values = f(*np.meshgrid(*points, indexing='ij'))
return points, values
def test_2d(self):
# test interpolated values
points, values, func, df = self._get_sample_2d()
np.random.seed(1)
X, Y = np.meshgrid(*points, indexing='ij')
X = X.ravel()
Y = Y.ravel()
grid = np.array([X, Y]).T
test_pt = np.random.uniform(0, 3, 2)
actual = func(*test_pt)
for method in self.interp_methods:
interp = InterpNDSemi(grid, values.ravel(), method=method)
computed = interp.interpolate(test_pt)
r_err = rel_error(actual, computed)
assert r_err < self.tol[method]
def test_minimum_required_gridsize(self):
for method in self.interp_methods:
k = self.interp_configs[method] - 1
x = np.linspace(0, 1, k)
y = np.linspace(0, 1, k)
points = [x, y]
X, Y = np.meshgrid(*points, indexing='ij')
X = X.ravel()
Y = Y.ravel()
values = X + Y
grid = np.array([X, Y]).T
with self.assertRaises(ValueError) as cm:
interp = InterpNDSemi(grid, values, method=method)
msg = 'There are {} points in a data dimension, but method'.format(k)
self.assertTrue(str(cm.exception).startswith(msg))
def test_NaN_exception(self):
np.random.seed(1234)
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
points = [x, y]
X, Y = np.meshgrid(*points, indexing='ij')
X = X.ravel()
Y = Y.ravel()
grid = np.array([X, Y]).T
values = np.random.rand(5, 7).ravel()
interp = InterpNDSemi(grid, values, method='slinear', extrapolate=False)
with self.assertRaises(OutOfBoundsError) as cm:
interp.interpolate(np.array([1, np.nan]))
err = cm.exception
self.assertEqual(str(err), 'One of the requested xi contains a NaN')
self.assertEqual(err.idx, 1)
self.assertTrue(np.isnan(err.value))
def test_error_messages(self):
points, values = self._get_sample_4d_large()
X, Y, Z, W = np.meshgrid(*points, indexing='ij')
X = X.ravel()
Y = Y.ravel()
Z = Z.ravel()
W = W.ravel()
grid = np.array([X, Y, Z, W]).T
values = values.ravel()
with self.assertRaises(ValueError) as cm:
interp = InterpNDSemi(grid, values, method='junk')
msg = ('Interpolation method "junk" is not defined. Valid methods are')
self.assertTrue(cm.exception.args[0].startswith(msg))
with self.assertRaises(ValueError) as cm:
interp = InterpNDSemi(grid, values, method=points)
msg = ("Argument 'method' should be a string.")
self.assertTrue(cm.exception.args[0].startswith(msg))
with self.assertRaises(ValueError) as cm:
interp = InterpNDSemi(grid, values[:-1], method='slinear')
msg = ('There are 2016 point arrays, but 2015 values.')
self.assertEqual(cm.exception.args[0], msg)
badgrid = deepcopy(grid)
badgrid[0][0] = -6.0
with self.assertRaises(ValueError) as cm:
interp = InterpNDSemi(badgrid, values, method='slinear')
msg = ('The points in dimension 0 must be strictly ascending.')
self.assertEqual(cm.exception.args[0], msg)
with self.assertRaises(KeyError) as cm:
interp = InterpNDSemi(grid, values, method='slinear', bad_arg=1)
msg = ("Option 'bad_arg' cannot be set because it has not been declared.")
self.assertTrue(cm.exception.args[0].endswith(msg))
class TestInterpNDPython(unittest.TestCase):
"""Tests for the non-scipy interpolation algorithms."""
def setUp(self):
self.interp_configs = {
"slinear": 2,
"cubic": 3,
"lagrange2": 3,
"lagrange3": 4,
"akima": 4,
"scipy_slinear": 1,
"scipy_cubic": 3,
"scipy_quintic": 5,
}
self.spline_configs = {
"slinear": 2,
"cubic": 3,
"lagrange2": 3,
"lagrange3": 4,
"akima": 4,
"bsplines": 4,
"scipy_slinear": 1,
"scipy_cubic": 3,
"scipy_quintic": 5,
}
self.interp_methods = self.interp_configs.keys()
self.spline_methods = self.spline_configs.keys()
self.tol = {
"slinear": 5e-2,
"lagrange2": 5e-2,
"lagrange3": 1e-4,
"cubic": 1e-4,
"akima": 1e-3,
"bsplines": 1e-1,
"scipy_slinear": 5e-2,
"scipy_cubic": 1e-4,
"scipy_quintic": 1e-6,
}
def _get_sample_1d(self):
# test problem with enough points for smooth spline fits
def f(u):
return 2.0 * np.cos(u)
def df(u):
return -2.0 * np.sin(u)
# uniformly spaced axis
u = np.linspace(0, 3, 50)
points = [u]
values = f(u)
return points, values, f, df
def _get_sample_2d(self):
# test problem with enough points for smooth spline fits
def f(u, v):
return u * np.cos(u * v) + v * np.sin(u * v)
def df(u, v):
return (-u * v * np.sin(u * v) + v**2 * np.cos(u * v) +
np.cos(u * v),
-u**2 * np.sin(u * v) + u * v * np.cos(u * v) +
np.sin(u * v))
# uniformly spaced axis
u = np.linspace(0, 3, 50)
# randomly spaced axis
np.random.seed(7590)
v = np.random.uniform(0, 3, 50)
v.sort()
points = [u, v]
values = f(*np.meshgrid(*points, indexing='ij'))
return points, values, f, df
def _get_sample_4d_large(self):
def f(x, y, z, w):
return x**2 + y**2 + z**2 + w**2
X = np.linspace(-10, 10, 6)
Y = np.linspace(-10, 10, 7)
np.random.seed(0)
Z = np.random.uniform(-10, 10, 6)
Z.sort()
W = np.linspace(-10, 10, 8)
points = [X, Y, Z, W]
values = f(*np.meshgrid(*points, indexing='ij'))
return points, values
def test_minimum_required_gridsize(self):
for method in self.interp_methods:
# Scipy does order reduction as needed.
if method.startswith('scipy'):
continue
k = self.interp_configs[method] - 1
x = np.linspace(0, 1, k)
y = np.linspace(0, 1, k)
points = [x, y]
X, Y = np.meshgrid(*points, indexing='ij')
values = X + Y
#self.assertRaises(ValueError, InterpND, points, values, method)
with self.assertRaises(ValueError) as cm:
interp = InterpND(method=method, points=points, values=values)
msg = 'There are {} points in a data dimension, but method'.format(k)
self.assertTrue(str(cm.exception).startswith(msg))
def test_spline_single_dim(self):
# test interpolated values
points, values, func, df = self._get_sample_1d()
test_pt = np.array([[0.76], [.33]])
actual = func(test_pt).flatten()
for method in self.interp_methods:
interp = InterpND(method=method, points=points, values=values)
computed = interp.interpolate(test_pt)
r_err = rel_error(actual, computed)
assert r_err < self.tol[method]
def test_spline_xi1d(self):
# test interpolated values
points, values, func, df = self._get_sample_2d()
np.random.seed(1)
test_pt = np.random.uniform(0, 3, 2)
actual = func(*test_pt)
for method in self.interp_methods:
interp = InterpND(method=method, points=points, values=values)
computed = interp.interpolate(test_pt)
r_err = rel_error(actual, computed)
assert r_err < self.tol[method]
def test_spline_out_of_bounds_extrap(self):
points, values, func, df = self. _get_sample_2d()
np.random.seed(5)
test_pt = np.random.uniform(3, 3.1, 2)
actual = func(*test_pt)
gradient = np.array(df(*test_pt))
tol = 1e-1
for method in self.interp_methods:
k = self.interp_configs[method]
if method == 'slinear':
tol = 2
interp = InterpND(method=method, points=points, values=values,
extrapolate=True)
computed, computed_grad = interp.interpolate(test_pt, compute_derivative=True)
computed_grad = interp.gradient(test_pt)
r_err = rel_error(actual, computed)
assert r_err < tol
r_err = rel_error(gradient, computed_grad)
# extrapolated gradients are even trickier, but usable still
assert r_err < 2 * tol
def test_spline_xi3d(self):
points, values, func, df = self. _get_sample_2d()
np.random.seed(1)
test_pt = np.random.uniform(0, 3, 6).reshape(3, 2)
actual = func(*test_pt.T)
for method in self.interp_methods:
interp = InterpND(method=method, points=points, values=values)
computed = interp.interpolate(test_pt)
r_err = rel_error(actual, computed)
#print(method, computed, actual, r_err)
assert r_err < self.tol[method]
def test_spline_xi3d_akima_delta_x(self):
points, values, func, df = self. _get_sample_2d()
np.random.seed(1)
test_pt = np.random.uniform(0, 3, 6).reshape(3, 2)
actual = func(*test_pt.T)
interp = InterpND(method='akima', points=points, values=values, delta_x=0.01)
computed = interp.interpolate(test_pt)
r_err = rel_error(actual, computed)
#print('akima', computed, actual, r_err)
assert r_err < self.tol['akima']
def test_spline_deriv_xi1d(self):
# tests gradient values
points, values, func, df = self._get_sample_2d()
np.random.seed(1234)
test_pt = np.random.uniform(0, 3, 2)
actual = np.array(df(*test_pt))
for method in self.interp_methods:
interp = InterpND(method=method,points=points, values=values)
computed = interp.gradient(test_pt)
r_err = rel_error(actual, computed)
assert r_err < 2.5 * self.tol[method]
# test that gradients have been cached
assert_equal_arrays(interp._xi.flatten(), test_pt.flatten())
assert_equal_arrays(interp._d_dx.flatten(), computed.flatten())
def test_gradients_returned_by_xi(self):
# verifies that gradients with respect to xi are returned if cached
points, values, func, df = self._get_sample_2d()
np.random.seed(4321)
for method in self.interp_methods:
interp = InterpND(method=method, points=points, values=values)
x = np.array([0.9, 0.1])
interp._xi = x
dy = np.array([0.997901, 0.08915])
interp._d_dx = dy
assert_near_equal(interp.gradient(x), dy, tolerance=1e-7)
def test_akima_interpolating_spline(self):
n_cp = 80
n_point = 160
t = np.linspace(0, 3.0*np.pi, n_cp)
tt = np.linspace(0, 3.0*np.pi, n_point)
x =
|
np.sin(t)
|
numpy.sin
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 22:46:20 2020
@author: zinan
"""
import numpy as np
from numpy import linalg as LA
from BFSVM_class import Kernel
from BFSVM_class import precision
from imblearn.over_sampling import SVMSMOTE
import math
from sklearn.model_selection import train_test_split
"""
Least Square Fuzzy SVM
linear equation problem Package: NUMPY.LINALG
Parameters
C: penalty
kernel_dict :
'type': 'LINEAR' / 'RBF' 'sigma' / 'POLY' 'd'
fuzzyvalue:
membershape value based on the class of center
'type': 'Cen'
'function' : 'Lin' / 'Exp'
membershape value based on the actuale hyper-plane
'type': 'Hyp'
'function' : 'Lin' / 'Exp'
r_max : radio between 0 and 1
r_min : radio between 0 and 1 for balancing data
usually for the majority class r = len(y_minority)/len(y_majority)
and for the minority class r = 1
Methods
_mvalue(self, X, y)
Calculate fuzzy membership value
fit(self, X, Y)
Fit the model according to the given training data.
predict(self, X)
Predict class labels for samples in X.
Platt_Probabilistic(self,deci,label,prior1,prior0)
For posterior class probability Pr(y = 1|x) = 1/(1+exp(Af+B)) calculate
Position parameter (B) and scale parameter (A)
predict_prob(self,X)
Posterior class probability Pr(y = 1|x)
decision_function(self, X)
Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that sample to the hyperplane.
"""
class LSFSVM:
def __init__(
self,
C=3,
kernel_dict={"type": "LINEAR"},
fuzzyvalue={"type": "Cen", "function": "Lin"},
databalance="origine",
r_max=1,
r_min=1,
):
self.C = C
self.kernel_dict = kernel_dict
self.fuzzyvalue = fuzzyvalue
self.r_max = r_max
self.r_min = r_min
self.databalance = databalance
# self.m_value = None
# self.alpha = None
# self.b = None
# self.K = None
def _mvalue(self, X, y):
# print('fuzzy value:', self.fuzzyvalue )
train_data = np.append(X, y.reshape(len(y), 1), axis=1)
if self.databalance == "LowSampling":
data_maj = train_data[y == 1] # 将多数
data_min = train_data[y != 1]
index = np.random.randint(len(data_maj), size=len(data_min))
lower_data_maj = data_maj[list(index)]
train_data = np.append(lower_data_maj, data_min, axis=0)
X = train_data[:, :-1]
y = train_data[:, -1]
elif self.databalance == "UpSampling":
X, y = SVMSMOTE(random_state=42).fit_sample(
train_data[:, :-1], np.asarray(train_data[:, -1])
)
else:
X = X
y = y
if self.fuzzyvalue["type"] == "Cen":
x_1 = X[y == 1]
x_0 = X[y == -1]
x_centre_1 = np.mean(x_1, axis=0)
x_centre_0 = np.mean(x_0, axis=0)
max_distance_1 = 0
max_distance_0 = 0
for i in range(len(x_1)):
distance = LA.norm(x_centre_1 - x_1[i, :])
if max_distance_1 < distance:
max_distance_1 = distance
for i in range(len(x_0)):
distance = LA.norm(x_centre_0 - x_0[i, :])
if max_distance_0 < distance:
max_distance_0 = distance
memership = []
if self.fuzzyvalue["function"] == "Lin":
for i in range(len(y)):
if y[i] == 1:
memership.append(
(1 - LA.norm(X[i] - x_centre_1) / (max_distance_1 + 0.0001))
* self.r_max
)
if y[i] == -1:
memership.append(
(1 - LA.norm(X[i] - x_centre_0) / (max_distance_0 + 0.0001))
* self.r_min
)
elif self.fuzzyvalue["function"] == "Exp":
for i in range(len(y)):
if y[i] == 1:
memership.append(
(2 / (1 + np.exp(LA.norm(X[i] - x_centre_1)))) * self.r_max
)
if y[i] == -1:
memership.append(
(2 / (1 + np.exp(LA.norm(X[i] - x_centre_0)))) * self.r_min
)
elif self.fuzzyvalue["type"] == "Hyp":
m = y.shape[0]
C = 3
gamma = 1
# Kernel
K = Kernel.RBF(m, gamma)
K.calculate(X)
H = np.multiply(np.dot(np.matrix(y).T, np.matrix(y)), K.kernelMat)
M_BR = H + np.eye(m) / C
# Concatenate
L_L = np.concatenate((np.matrix(0), np.matrix(y).T), axis=0)
L_R = np.concatenate((np.matrix(y), M_BR), axis=0)
L = np.concatenate((L_L, L_R), axis=1)
R = np.ones(m + 1)
R[0] = 0
# solve
b_a = LA.solve(L, R)
b = b_a[0]
alpha = b_a[1:]
K.expand(X)
A = np.multiply(alpha, y)
f = b + np.dot(K.testMat, A)
d_hyp = abs(f * y)
memership = []
if self.fuzzyvalue["function"] == "Lin":
for i in range(len(y)):
if y[i] == 1:
memership.append(
(1 - d_hyp[i] / (max(d_hyp) + 0.0001)) * self.r_max
)
if y[i] == -1:
memership.append(
(1 - d_hyp[i] / (max(d_hyp) + 0.0001)) * self.r_min
)
elif self.fuzzyvalue["function"] == "Exp":
for i in range(len(y)):
if y[i] == 1:
memership.append((2 / (1 + np.exp(d_hyp[i]))) * self.r_max)
if y[i] == -1:
memership.append((2 / (1 + np.exp(d_hyp[i]))) * self.r_min)
self.m_value = np.array(memership)
return self.m_value
def fit(self, X, Y):
# print('Kernel:', self.kernel_dict)
train_data = np.append(X, Y.reshape(len(Y), 1), axis=1)
if self.databalance == "LowSampling":
data_maj = train_data[Y == 1] # 将多数
data_min = train_data[Y != 1]
index = np.random.randint(len(data_maj), size=len(data_min))
lower_data_maj = data_maj[list(index)]
train_data = np.append(lower_data_maj, data_min, axis=0)
X = train_data[:, :-1]
Y = train_data[:, -1]
self.Y = Y
elif self.databalance == "UpSampling":
X, Y = SVMSMOTE(random_state=42).fit_sample(
train_data[:, :-1], np.asarray(train_data[:, -1])
)
self.Y = Y
else:
X = X
Y = Y
self.Y = Y
m = len(Y)
# Kernel
if self.kernel_dict["type"] == "RBF":
K = Kernel.RBF(m, self.kernel_dict["sigma"])
K.calculate(X)
elif self.kernel_dict["type"] == "LINEAR":
K = Kernel.LINEAR(m)
K.calculate(X)
elif self.kernel_dict["type"] == "POLY":
K = Kernel.POLY(m, self.kernel_dict["d"])
K.calculate(X)
H = np.multiply(np.dot(np.matrix(Y).T, np.matrix(Y)), K.kernelMat)
M_BR = H + np.eye(m) / (self.C * (self.m_value[:, None]))
# Concatenate
L_L = np.concatenate((
|
np.matrix(0)
|
numpy.matrix
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 29 14:19:00 2016
Module:
birg - Bipartite Random Graph
Author:
<NAME>
Description:
Implementation of the Bipartite Random Graph model (BiRG).
Given a biadjacency matrix of an unweighted bipartite graph in the form of
a binary array as input, the module allows the user to create the
corresponding Bipartite Random Graph null model. The user can calculate and
save the p-values of the observed :math:`\\Lambda`-motifs of the two
distinct bipartite node sets, which correspond to the row and column
indices of the biadjacency matrix.
Usage:
Be ``mat`` a two-dimensional binary NumPy array. The nodes of the two
bipartite layers are ordered along the columns and rows, respectively. In
the algorithm, the two layers are identified by the boolean values ``True``
for the **row-nodes** and ``False`` for the **column-nodes**.
Import the module and initialize the Bipartite Random Graph model::
>>> from src.birg import BiRG
>>> rg = BiRG(bin_mat=mat)
In order to analyze the similarity of the **row-layer nodes** and to save
the p-values of the corresponding :math:`\\Lambda`-motifs, i.e. of the
number of shared neighbors [Saracco2016]_, use::
>>> rg.lambda_motifs(bip_set=True, filename=<filename>, delim='\\t',
binary=True)
For the **column-layer nodes**, use::
>>> cm.lambda_motifs(bip_set=False, filename=<filename>, delim='\\t',
binary=True)
``bip_set`` selects the bipartite node set for which the p-values should be
calculated and saved. By default, the file is saved as a binary NumPy file
``.npy``. In order to save it as a human-readable CSV format, set
``binary=False`` in the function call.
The filename *<filename>* should contain a relative path declaration. The
default name of the output file is *p_values_<bip_set>* and ends with
``.npy`` or ``.csv`` depending on the variable ``binary``. In the CSV
format, the values are separated by tabs, which can be changed using the
``delim`` keyword.
Subsequently, the p-values can be used to perform a multiple hypotheses
testing and to obtain statistically validated monopartite projections
[Saracco2016]_. The p-values are calculated in parallel by default.
Reference:
<NAME>, <NAME>, Statistical mechanics of complex networks
Rev. Mod. Phys. 74, 47
doi:http://dx.doi.org/10.1103/RevModPhys.74.47
[Saracco2016] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Inferring monopartite projections of bipartite
networks: an entropy-based approach, arXiv preprint arXiv:1607.02481
"""
import os
import numpy as np
from scipy.stats import binom
class BiRG:
"""Bipartite Random Graph for undirected binary bipartite networks.
This class implements the Bipartite Random Graph (BiGM), which can be used
as a null model for the analysis of undirected and binary bipartite
networks. The class provides methods to calculate the biadjacency matrix of
the null model and to quantify node similarities in terms of p-values.
"""
def __init__(self, bin_mat):
"""Initialize the parameters of the BiRG.
:param bin_mat: binary input matrix describing the biadjacency matrix
of a bipartite graph with the nodes of one layer along the rows
and the nodes of the other layer along the columns.
:type bin_mat: numpy.array
"""
self.bin_mat = np.array(bin_mat)
self.check_input_matrix_is_binary()
[self.num_rows, self.num_columns] = self.bin_mat.shape
self.num_edges = self.get_number_edges()
self.edge_prob = self.get_edge_prob()
self.lambda_prob = self.get_lambda_motif_prob()
# ------------------------------------------------------------------------------
# Initialization
# ------------------------------------------------------------------------------
def check_input_matrix_is_binary(self):
"""Check that the input matrix is binary, i.e. entries are 0 or 1.
:raise AssertionError: raise an error if the input matrix is not
binary
"""
assert np.all(np.logical_or(self.bin_mat == 0, self.bin_mat == 1)), \
"Input matrix is not binary."
def get_number_edges(self):
"""Return the number of edges encoded in the biadjacency matrix.
:returns: number of edges in the graph
:rtype: float
"""
# edges are indicated as "1" in the matrix
return self.bin_mat.sum()
def get_edge_prob(self):
"""Return the uniform edge probability of the Bipartite Random Graph.
:returns: edge probability
:rtype: float
"""
p = float(self.num_edges) / (self.num_rows * self.num_columns)
return p
def get_lambda_motif_prob(self):
"""Return the probability of a :math:`\\Lambda`-motif.
For two nodes nodes :math:`i, j`, the probability of the motif
:math:`\\Lambda_{ij}^{\\alpha}` is given by
.. math::
p(\\Lambda_{ij}^{\\alpha}) = p_{i\\alpha} * p_{j\\alpha}.
:returns: probability for a :math:`\\Lambda`-motif
:rtype: float
"""
pp = self.edge_prob * self.edge_prob
return pp
# ------------------------------------------------------------------------------
# Total log-likelihood of the observed Lambda motifs in the input matrix
# ------------------------------------------------------------------------------
def lambda_loglike(self, bip_set=False):
"""Return the maximum likelihood of the edge weights of the projection
on the specified bipartite set.
:param bip_set: analyze countries (True) or products (False)
:type bip_set: bool
:param write: if True, the pvalues are saved in an external file
:type write: bool
"""
lambda_mat = self.get_lambda_vals(bip_set)
p_mat = self.get_proj_pmat(lambda_mat, bip_set)
logp = np.log(p_mat[np.triu_indices_from(p_mat, k=1)])
loglike = logp.sum()
return loglike
def get_proj_pmat(self, mat, bip_set=False):
"""Return a matrix of probabilites for the observed Lambda_2 motifs in
the input matrix.
pmf(k) = Pr(X = k)
The lower triangular part (including the diagonal) of the pvalue
matrix is set to zero.
:param bip_set: selects countries (True) or products (False)
:type bip_set: bool
:param mat: matrix of observed Lambda_2 motifs
:type mat: np.array
"""
bn = self.get_binomial(bip_set)
m = bn.pmf(mat)
m[np.tril_indices_from(m, k=0)] = 0
return m
# ------------------------------------------------------------------------------
# Lambda motifs
# ------------------------------------------------------------------------------
def lambda_motifs(self, bip_set=False, filename=None, delim='\t',
binary=True):
"""Calculate and save the p-values of the :math:`\\Lambda`-motifs.
For each node couple in the bipartite layer specified by ``bip_set``,
the p-values of the corresponding :math:`\\Lambda`-motifs are
calculated based on the biadjacency matrix of the BiRG null model.
The results can be saved either as a binary or a human-readable file.
.. note::
* The output consists of one array of p-values to keep memory usage
low. If the bipartite layer ``bip_set`` contains ``n`` nodes,
this means that the array will contain :math:`\\binom{n}{2}``
entries. The indices of the nodes corresponding to entry ``k``
in the array can be reconstructed using the method
``flat2_triumat_idx(k, n)``.
* If ``binary == False``, the ``filename`` should end with
``.csv``. Otherwise, it will be saved in binary format and the
suffix ``.npy`` will be appended automatically. By default, the file
is saved in binary format.
:param bip_set: select row-nodes (``True``) or column-nodes (``False``)
:type bip_set: bool
:param parallel: select whether the calculation of the p-values should
be run in parallel (``True``) or not (``False``)
:type parallel: bool
:param filename: name of the file which will contain the p-values
:type filename: str
:param delim: delimiter between entries in file, default is tab
:type delim: str
:param binary: if ``True``, the file will be saved in the binary
NumPy format ``.npy``, otherwise as ``.csv``
:type binary: bool
"""
lambda_mat = self.get_lambda_vals(bip_set)
pval_mat = self.get_lambda_pvalues(lambda_mat, bip_set)
if filename is None:
fname = 'p_values_' + str(bip_set)
if not binary:
fname += '.csv'
else:
fname = filename
# account for machine precision:
pval_mat += np.finfo(np.float).eps
self.save_matrix(pval_mat, filename=fname, delim=delim, binary=binary)
def get_lambda_vals(self, bip_set=False):
"""Return an array of observed :math:`\\Lambda`-motifs.
The number of elements in the returned array ``A`` is
:math:`\\binom{N}{2}`, where :math:`N` is the number of distinct nodes
in the bipartite layer ``bip_set``. The entries are given as
.. math::
A_{ij} = N(\\Lambda_{ij})
:param bip_set: select row-nodes (``True``) or column-nodes (``False``)
:type bip_set: bool
:returns: array of observed :math:`\\Lambda`-motifs
:rtype: numpy.array
:raise NameError: raise an error if the parameter ``bip_set`` is
neither ``True`` nor ``False``
"""
if not bip_set:
lambda_mat = np.dot(np.transpose(self.bin_mat), self.bin_mat)
assert lambda_mat.shape == (self.num_columns, self.num_columns)
elif bip_set:
lambda_mat = np.dot(self.bin_mat, np.transpose(self.bin_mat))
assert lambda_mat.shape == (self.num_rows, self.num_rows)
else:
errmsg = str(bip_set) + 'not supported.'
raise NameError(errmsg)
# set diagonal to zero
# di = np.diag_indices(lambda_mat.shape[0], 2)
# lambda_mat[di] = 0
return lambda_mat[
|
np.triu_indices_from(lambda_mat, k=1)
|
numpy.triu_indices_from
|
#!/usr/bin/env python
'''
Prepare data from custom dataset for PointRCNN architecture
'''
import os
import numpy as np
import pandas as pd
import pickle
import torch
from torch.utils.data import Dataset
import lib.utils.custom_data_utils as data_utils
import lib.utils.object3d as object3d
import lib.utils.kitti_utils as kitti_utils
import lib.utils.roipool3d.roipool3d_utils as roipool3d_utils
from lib.config import cfg
# DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data/custom_data/")
class CustomRCNNDataset(Dataset):
def __init__(self, root, num_points, split='train', mode='TRAIN',
random_select=True, logger=None, intensity_channel=True, rcnn_training_roi_dir=None,
rcnn_training_feature_dir=None, rcnn_eval_roi_dir=None, rcnn_eval_feature_dir=None,
gt_database_dir=None, single_test_input=False): # batch_size=10, normalize=False,
"""
:param root: directory path to the dataset
:param split: 'train' or 'test'
:param num_points: number of points to process for each pointcloud (needs to be the same)
:param normalize: whether include the normalized coords in features (default: False)
:param intensity_channel: whether to include the intensity value to xyz coordinates (default: True)
:param single_test_input: try the network with just one single input frame (default: False)
"""
self.root = os.path.join(root, 'custom_data')
self.split = split
self.logger = logger
self.num_points = num_points # TODO: define number of points to train with per frame
# self.batch_size = batch_size
# self.normalize = normalize
self.intensity_channel = intensity_channel
# self.shuffle = shuffle
self.classes = ('Pedestrian')
self.num_class = self.classes.__len__()
# load all data files
self.all_files = data_utils.get_data_files(os.path.join(self.root, 'full_data.txt'))
# for rcnn training
self.rcnn_training_bbox_list = []
self.rpn_feature_list = []
self.pos_bbox_list = []
self.neg_bbox_list = []
self.far_neg_bbox_list = []
self.rcnn_eval_roi_dir = rcnn_eval_roi_dir
self.rcnn_eval_feature_dir = rcnn_eval_feature_dir
self.rcnn_training_roi_dir = rcnn_training_roi_dir
self.rcnn_training_feature_dir = rcnn_training_feature_dir
assert mode in ['TRAIN', 'EVAL', 'TEST'], 'Invalid mode: %s' % mode
self.mode = mode
self.random_select = random_select
if not self.random_select:
self.logger.warning('random select is False')
# TODO: create batches
# Stage 1: Region Proposal Network (RPN)
# Stage 2: box proposal refinement subnetwork (RCNN)
if cfg.RPN.ENABLED:
# initialize ground truth database (needed for data augmentation)
if gt_database_dir is not None:
logger.info('Loading gt_database(%d) from %s' % (len(self.gt_database), gt_database_dir))
self.gt_database = pickle.load(open(gt_database_dir, 'rb'))
# load samples to work with (depending on train/test/val mode)
if single_test_input: # this is for trying network architecture with single input frame
if self.mode == 'TRAIN':
self.split_dir = os.path.join(self.root, 'train_trial.txt')
elif self.mode == 'EVAL':
self.split_dir = os.path.join(self.root, 'test_trial.txt')
else:
self.split_dir = os.path.join(self.root, split + '.txt')
self.logger.info('Load samples from %s' % self.split_dir)
self.current_samples = data_utils.get_data_files(self.split_dir)
# Create Mapping from sample frames to frame ids
self.sample_id_list = [idx for idx in range(0, self.current_samples.__len__())]
self.num_sample = self.sample_id_list.__len__()
# self.num_sample = self.all_files.__len__()
self.logger.info('Done: total {}-samples {}'.format(self.split, len(self.current_samples)))
def get_lidar(self, index):
""" Returns lidar point data loaded from h5 file in form of (N,4).
Args:
frame (string): frame id
"""
frame = self.current_samples[index]
# print('++++++++ Frame {} +++++++++'.format(frame))
lidar_file = os.path.join(self.root, frame)
assert os.path.exists(lidar_file)
pts, _ = data_utils.load_h5(lidar_file)
return pts
def get_label(self, index):
""" Returns point labels for each point in lidar data loaded from h5 file in form of (N,1).
Args:
frame (string): frame id
"""
frame = self.current_samples[index]
lidar_file = os.path.join(self.root, frame)
assert os.path.exists(lidar_file)
_, labels = data_utils.load_h5(lidar_file)
return np.reshape(labels, (-1,1))
def get_bbox_label(self, index):
"""
Return bbox annotations per frame, defined as (N,7), i.e. (N x [x, y, z, h, w, l, ry])
Args:
frame (string): frame id
"""
frame = self.current_samples[index]
lidar_file = os.path.join(self.root, frame)
assert os.path.exists(lidar_file)
# point labels not used here, bboxes instead
_, _, bbox = data_utils.load_h5(lidar_file, bbox=True)
# transform single bbox annotation in list for compability reasons (dataset can be extended with >1 bboxes per frame)
bbox_list = np.reshape(bbox, (1,-1))
bbox_obj_list = [object3d.Object3d(box, gt=True) for box in bbox_list]
return bbox_list
def __len__(self):
# TODO: validate this setting also for RCNN
return len(self.sample_id_list)
def __getitem__(self, index):
# return self.data, self.labels
if cfg.RPN.ENABLED:
return self.get_rpn_sample(index)
elif cfg.RCNN.ENABLED:
if self.mode == 'TRAIN':
if cfg.RCNN.ROI_SAMPLE_JIT:
return self.get_rcnn_sample_jit(index)
else:
return self.get_rcnn_training_sample_batch(index)
else:
return self.get_proposal_from_file(index)
else:
raise NotImplementedError
# ------------- RPN Functions --------------------
def get_rpn_sample(self, index):
""" Prepare input for region proposal network.
Args:
index (int): The index of the point cloud instance, i.e. the corresp. frame.
"""
sample_id = int(self.sample_id_list[index])
pts_lidar = self.get_lidar(sample_id)
labels = self.get_label(sample_id)
if self.intensity_channel:
pts_intensity = pts_lidar[:, 3]
# normalize intensity values by min, max possible values (0,255) & translate intensity to [-0.5, 0.5]
pts_intensity_norm = ((pts_intensity - 0) / (255 - 0)).reshape(-1,1) - 0.5
sample_info = {'sample_id': sample_id, 'random_select': self.random_select}
# generate inputs
pts_coor = pts_lidar[:,:3]
dist = np.linalg.norm(pts_lidar[:, 0:3], axis=1)
# dist = np.sqrt(np.sum(pts_coor**2, axis=1,keepdims=True))
# print(dist)
if self.mode == "TRAIN" or self.random_select:
if self.num_points < len(pts_lidar): # downsample points
# flag for near points
dist_flag = dist < 8.0 # initial value for cars was 40 -> choose smaller value for indoor setting
far_inds = np.where(dist_flag == 0)[0]
near_inds = np.where(dist_flag == 1)[0]
near_inds_choice = np.random.choice(near_inds, self.num_points - len(far_inds), replace=False)
if self.num_points > len(far_inds):
choice = np.concatenate((near_inds_choice, far_inds), axis=0) if len(far_inds) > 0 else near_inds_choice
else:
choice = np.arange(0, len(self.num_points), dtype=np.int32)
choice = np.random.choice(choice, self.num_points, replace=False)
else:
choice = np.arange(0, len(pts_lidar), dtype=np.int32)
if self.num_points > len(pts_lidar): # upsample points by randomly doubling existent points
extra_choice = np.random.choice(choice, self.num_points - len(pts_lidar), replace=False)
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
pts_coor = pts_coor[choice,:]
pts_features = [pts_intensity_norm[choice,:]]
ret_pts_features = np.concatenate(pts_features, axis=1) if pts_features.__len__() > 1 else pts_features[0]
# prepare input
if cfg.RPN.USE_INTENSITY:
pts_input = np.concatenate((pts_coor, ret_pts_features), axis=1) # (N, C)
else:
pts_input = pts_coor
sample_info['pts_input'] = pts_input
sample_info['pts_rect'] = pts_input
sample_info['pts_features'] = pts_intensity_norm[choice,:]
# stop here if only testing
if self.mode == 'TEST':
return sample_info
# prepare 3d ground truth bound boxes sss
gt_bbox_list = self.get_bbox_label(index)
# gt_obj_list = [object3d.Object3d(box_annot, gt=True) for box_annot in gt_bbox_list]
gt_boxes3d = kitti_utils.objs_to_boxes3d_velodyne(gt_obj_list)
#TODO: data augmentation
# generate training labels
rpn_cls_label, rpn_reg_label = self.generate_rpn_training_labels(pts_coor, gt_boxes3d)
# rpn_cls_label = (labels[choice,:]).astype(np.float32)
sample_info['rpn_cls_label'] = rpn_cls_label # 0:background, 1: pedestrian
sample_info['rpn_reg_label'] = rpn_reg_label
sample_info['gt_boxes3d'] = gt_boxes3d
return sample_info
@staticmethod
def generate_rpn_training_labels(pts_coor, gt_boxes3d):
# bottom up 3d bbox regression from foreground points during training
cls_label = np.zeros((pts_coor.shape[0]), dtype=np.int32)
reg_label = np.zeros((pts_coor.shape[0], 7), dtype=np.float32) # dx, dy, dz, rz, h, w, l
gt_corners = kitti_utils.boxes3d_to_corners3d_velodyne(gt_boxes3d, rotate=True)
extend_gt_boxes3d = kitti_utils.enlarge_box3d(gt_boxes3d, extra_width=0.2)
extend_gt_corners = kitti_utils.boxes3d_to_corners3d_velodyne(extend_gt_boxes3d, rotate=True)
for k in range(gt_boxes3d.shape[0]):
box_corners = gt_corners[k]
fg_pt_flag = kitti_utils.in_hull(pts_coor, box_corners)
fg_pts_coor = pts_coor[fg_pt_flag]
cls_label[fg_pt_flag] = 1
# enlarge the bbox3d, ignore nearby points
extend_box_corners = extend_gt_corners[k]
fg_enlarge_flag = kitti_utils.in_hull(pts_coor, extend_box_corners)
ignore_flag = np.logical_xor(fg_pt_flag, fg_enlarge_flag)
cls_label[ignore_flag] = -1
# pixel offset of object center
center3d = gt_boxes3d[k][0:3].copy() # (x, y, z)
center3d[2] += gt_boxes3d[k][3] / 2
reg_label[fg_pt_flag, 0:3] = center3d - fg_pts_coor # Now z is the true center of 3d box
# size and angle encoding
reg_label[fg_pt_flag, 3] = gt_boxes3d[k][3] # h
reg_label[fg_pt_flag, 4] = gt_boxes3d[k][4] # w
reg_label[fg_pt_flag, 5] = gt_boxes3d[k][5] # l
reg_label[fg_pt_flag, 6] = gt_boxes3d[k][6] # rz
return cls_label, reg_label
def collate_batch(self, batch):
""" Merge list of samples to create mini-batch
Args:
batch ([type]): [description]
"""
# testing
if self.mode != 'TRAIN' and cfg.RCNN.ENABLED and not cfg.RPN.ENABLED:
assert batch.__len__() == 1
return batch[0]
batch_size = batch.__len__()
ans_dict = {}
for key in batch[0].keys():
if cfg.RPN.ENABLED and key == 'gt_boxes3d' or \
(cfg.RCNN.ENABLED and cfg.RCNN.ROI_SAMPLE_JIT and key in ['gt_boxes3d', 'roi_boxes3d']):
max_gt = 0
for k in range(batch_size):
max_gt = max(max_gt, batch[k][key].__len__())
batch_gt_boxes3d = np.zeros((batch_size, max_gt, 7), dtype=np.float32)
for i in range(batch_size):
batch_gt_boxes3d[i, :batch[i][key].__len__(), :] = batch[i][key]
ans_dict[key] = batch_gt_boxes3d
continue
if isinstance(batch[0][key], np.ndarray):
if batch_size == 1:
ans_dict[key] = batch[0][key][np.newaxis, ...]
else:
ans_dict[key] = np.concatenate([batch[k][key][np.newaxis, ...] for k in range(batch_size)], axis=0)
else:
ans_dict[key] = [batch[k][key] for k in range(batch_size)]
if isinstance(batch[0][key], int):
ans_dict[key] = np.array(ans_dict[key], dtype=np.int32)
elif isinstance(batch[0][key], float):
ans_dict[key] = np.array(ans_dict[key], dtype=np.float32)
return ans_dict
@staticmethod
def get_rpn_features(rpn_feature_dir, idx):
rpn_feature_file = os.path.join(rpn_feature_dir, '%06d.npy' % idx)
rpn_xyz_file = os.path.join(rpn_feature_dir, '%06d_xyz.npy' % idx)
rpn_intensity_file = os.path.join(rpn_feature_dir, '%06d_intensity.npy' % idx)
if cfg.RCNN.USE_SEG_SCORE:
rpn_seg_file = os.path.join(rpn_feature_dir, '%06d_rawscore.npy' % idx)
rpn_seg_score = np.load(rpn_seg_file).reshape(-1)
rpn_seg_score = torch.sigmoid(torch.from_numpy(rpn_seg_score)).numpy()
else:
rpn_seg_file = os.path.join(rpn_feature_dir, '%06d_seg.npy' % idx)
rpn_seg_score = np.load(rpn_seg_file).reshape(-1)
return np.load(rpn_xyz_file), np.load(rpn_feature_file), np.load(rpn_intensity_file).reshape(-1), rpn_seg_score
# ------------- RCNN Functions --------------------
def get_proposal_from_file(self, index):
"""
If proposals from first stage were saved to txt files, they can be directly loaded.
"""
sample_id = int(self.image_idx_list[index])
proposal_file = os.path.join(self.rcnn_eval_roi_dir, '%06d.txt' % sample_id)
# get detections from output file of stage 1
roi_obj_list = kitti_utils.get_objects_from_label(proposal_file)
rpn_xyz, rpn_features, rpn_intensity, seg_mask = self.get_rpn_features(self.rcnn_eval_feature_dir, sample_id)
pts_rect, pts_rpn_features, pts_intensity = rpn_xyz, rpn_features, rpn_intensity
roi_box3d_list, roi_scores = [], []
for obj in roi_obj_list:
box3d = np.array([obj.pos[0], obj.pos[1], obj.pos[2], obj.h, obj.w, obj.l, obj.ry], dtype=np.float32)
roi_box3d_list.append(box3d.reshape(1, 7))
roi_scores.append(obj.score)
roi_boxes3d = np.concatenate(roi_box3d_list, axis=0) # (N, 7)
roi_scores = np.array(roi_scores, dtype=np.float32) # (N)
if cfg.RCNN.ROI_SAMPLE_JIT:
sample_dict = {'sample_id': sample_id,
'rpn_xyz': rpn_xyz,
'rpn_features': rpn_features,
'seg_mask': seg_mask,
'roi_boxes3d': roi_boxes3d,
'roi_scores': roi_scores,
'pts_depth': np.linalg.norm(rpn_xyz, ord=2, axis=1)}
if self.mode != 'TEST':
gt_obj_list = self.get_bbox_label(sample_id)
gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)
roi_corners = kitti_utils.boxes3d_to_corners3d_velodyne(roi_boxes3d)
gt_corners = kitti_utils.boxes3d_to_corners3d_velodyne(gt_boxes3d)
iou3d = kitti_utils.get_iou3d_velodyne(roi_corners, gt_corners)
if gt_boxes3d.shape[0] > 0:
gt_iou = iou3d.max(axis=1)
else:
gt_iou = np.zeros(roi_boxes3d.shape[0]).astype(np.float32)
sample_dict['gt_boxes3d'] = gt_boxes3d
sample_dict['gt_iou'] = gt_iou
return sample_dict
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [pts_intensity.reshape(-1, 1), seg_mask.reshape(-1, 1)]
else:
pts_extra_input_list = [seg_mask.reshape(-1, 1)]
if cfg.RCNN.USE_DEPTH:
cur_depth = np.linalg.norm(pts_rect, axis=1, ord=2)
cur_depth_norm = (cur_depth / 20.0) - 0.5
pts_extra_input_list.append(cur_depth_norm.reshape(-1, 1))
pts_extra_input = np.concatenate(pts_extra_input_list, axis=1)
pts_input, pts_features = roipool3d_utils.roipool3d_cpu(roi_boxes3d, pts_rect, pts_rpn_features,
pts_extra_input, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
sample_dict = {'sample_id': sample_id,
'pts_input': pts_input,
'pts_features': pts_features,
'roi_boxes3d': roi_boxes3d,
'roi_scores': roi_scores,
'roi_size': roi_boxes3d[:, 3:6]}
if self.mode == 'TEST':
return sample_dict
gt_obj_list = self.get_bbox_label(sample_id)
gt_boxes3d = np.zeros((gt_obj_list.__len__(), 7), dtype=np.float32)
for k, obj in enumerate(gt_obj_list):
gt_boxes3d[k, 0:3], gt_boxes3d[k, 3], gt_boxes3d[k, 4], gt_boxes3d[k, 5], gt_boxes3d[k, 6] \
= obj.pos, obj.h, obj.w, obj.l, obj.ry
if gt_boxes3d.__len__() == 0:
gt_iou = np.zeros((roi_boxes3d.shape[0]), dtype=np.float32)
else:
roi_corners = kitti_utils.boxes3d_to_corners3d_velodyne(roi_boxes3d)
gt_corners = kitti_utils.boxes3d_to_corners3d_velodyne(gt_boxes3d)
iou3d = kitti_utils.get_iou3d_velodyne(roi_corners, gt_corners)
gt_iou = iou3d.max(axis=1)
sample_dict['gt_boxes3d'] = gt_boxes3d
sample_dict['gt_iou'] = gt_iou
return sample_dict
def get_rcnn_sample_info(self, roi_info):
sample_id, gt_box3d = roi_info['sample_id'], roi_info['gt_box3d']
rpn_xyz, rpn_features, rpn_intensity, seg_mask = self.rpn_feature_list[sample_id]
# augmentation original roi by adding noise
roi_box3d = self.aug_roi_by_noise(roi_info)
# point cloud pooling based on roi_box3d
pooled_boxes3d = kitti_utils.enlarge_box3d(roi_box3d.reshape(1, 7), cfg.RCNN.POOL_EXTRA_WIDTH)
# inside/outside test if point inside enlarged bbox
boxes_pts_mask_list = roipool3d_utils.pts_in_boxes3d_cpu(torch.from_numpy(rpn_xyz),
torch.from_numpy(pooled_boxes3d))
pt_mask_flag = (boxes_pts_mask_list[0].numpy() == 1)
cur_pts = rpn_xyz[pt_mask_flag].astype(np.float32)
# data augmentation
aug_pts = cur_pts.copy()
aug_gt_box3d = gt_box3d.copy().astype(np.float32)
aug_roi_box3d = roi_box3d.copy()
#TODO:
# if cfg.AUG_DATA and self.mode == 'TRAIN':
# # calculate alpha by ry
# temp_boxes3d = np.concatenate([aug_roi_box3d.reshape(1, 7), aug_gt_box3d.reshape(1, 7)], axis=0)
# temp_x, temp_y, temp_rz = temp_boxes3d[:, 0], temp_boxes3d[:, 1], temp_boxes3d[:, 6]
# temp_beta = np.arctan2(temp_y, temp_x).astype(np.float64)
# temp_alpha = -np.sign(temp_beta) * np.pi / 2 + temp_beta + temp_rz
# # data augmentation
# aug_pts, aug_boxes3d, aug_method = self.data_augmentation(aug_pts, temp_boxes3d, temp_alpha, mustaug=True, stage=2)
# aug_roi_box3d, aug_gt_box3d = aug_boxes3d[0], aug_boxes3d[1]
# aug_gt_box3d = aug_gt_box3d.astype(gt_box3d.dtype)
# Pool input points
valid_mask = 1 # whether the input is valid
if aug_pts.shape[0] == 0:
pts_features = np.zeros((1, 128), dtype=np.float32)
input_channel = 3 + int(cfg.RCNN.USE_INTENSITY) + int(cfg.RCNN.USE_MASK) + int(cfg.RCNN.USE_DEPTH)
pts_input = np.zeros((1, input_channel), dtype=np.float32)
valid_mask = 0
else:
pts_features = rpn_features[pt_mask_flag].astype(np.float32)
pts_intensity = rpn_intensity[pt_mask_flag].astype(np.float32)
pts_input_list = [aug_pts, pts_intensity.reshape(-1, 1)]
if cfg.RCNN.USE_INTENSITY:
pts_input_list = [aug_pts, pts_intensity.reshape(-1, 1)]
else:
pts_input_list = [aug_pts]
if cfg.RCNN.USE_MASK:
if cfg.RCNN.MASK_TYPE == 'seg':
pts_mask = seg_mask[pt_mask_flag].astype(np.float32)
elif cfg.RCNN.MASK_TYPE == 'roi':
pts_mask = roipool3d_utils.pts_in_boxes3d_cpu(torch.from_numpy(aug_pts),
torch.from_numpy(aug_roi_box3d.reshape(1, 7)))
pts_mask = (pts_mask[0].numpy() == 1).astype(np.float32)
else:
raise NotImplementedError
pts_input_list.append(pts_mask.reshape(-1, 1))
if cfg.RCNN.USE_DEPTH:
pts_depth = np.linalg.norm(aug_pts, axis=1, ord=2)
pts_depth_norm = (pts_depth / 20.0) - 0.5 # scale depth with max distance of 20
pts_input_list.append(pts_depth_norm.reshape(-1, 1))
pts_input = np.concatenate(pts_input_list, axis=1) # (N, C)
aug_gt_corners = kitti_utils.boxes3d_to_corners3d_velodyne(aug_gt_box3d.reshape(-1, 7))
aug_roi_corners = kitti_utils.boxes3d_to_corners3d_velodyne(aug_roi_box3d.reshape(-1, 7))
iou3d = kitti_utils.get_iou3d_velodyne(aug_roi_corners, aug_gt_corners)
cur_iou = iou3d[0][0]
# regression valid mask
reg_valid_mask = 1 if cur_iou >= cfg.RCNN.REG_FG_THRESH and valid_mask == 1 else 0
# classification label
cls_label = 1 if cur_iou > cfg.RCNN.CLS_FG_THRESH else 0
if cfg.RCNN.CLS_BG_THRESH < cur_iou < cfg.RCNN.CLS_FG_THRESH or valid_mask == 0:
cls_label = -1
# canonical transform and sampling
pts_input_ct, gt_box3d_ct = self.canonical_transform(pts_input, aug_roi_box3d, aug_gt_box3d)
pts_input_ct, pts_features = self.rcnn_input_sample(pts_input_ct, pts_features)
sample_info = {'sample_id': sample_id,
'pts_input': pts_input_ct,
'pts_features': pts_features,
'cls_label': cls_label,
'reg_valid_mask': reg_valid_mask,
'gt_boxes3d_ct': gt_box3d_ct,
'roi_boxes3d': aug_roi_box3d,
'roi_size': aug_roi_box3d[3:6],
'gt_boxes3d': aug_gt_box3d}
return sample_info
def get_rcnn_training_sample_batch(self, index):
sample_id = int(self.sample_id_list[index])
rpn_xyz, rpn_features, rpn_intensity, seg_mask = \
self.get_rpn_features(self.rcnn_training_feature_dir, sample_id)
# load rois and gt_boxes3d for this sample
roi_file = os.path.join(self.rcnn_training_roi_dir, '%06d.txt' % sample_id)
roi_obj_list = kitti_utils.get_objects_from_label(roi_file)
roi_boxes3d = kitti_utils.objs_to_boxes3d(roi_obj_list)
# roi_scores = kitti_utils.objs_to_scores(roi_obj_list)
gt_obj_list = self.get_bbox_label(sample_id)
gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)
# calculate original iou
iou3d = kitti_utils.get_iou3d_velodyne(kitti_utils.boxes3d_to_corners3d(roi_boxes3d),
kitti_utils.boxes3d_to_corners3d(gt_boxes3d))
max_overlaps, gt_assignment = iou3d.max(axis=1), iou3d.argmax(axis=1)
max_iou_of_gt, roi_assignment = iou3d.max(axis=0), iou3d.argmax(axis=0)
roi_assignment = roi_assignment[max_iou_of_gt > 0].reshape(-1)
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(np.round(cfg.RCNN.FG_RATIO * cfg.RCNN.ROI_PER_IMAGE))
fg_thresh = min(cfg.RCNN.REG_FG_THRESH, cfg.RCNN.CLS_FG_THRESH)
fg_inds = np.nonzero(max_overlaps >= fg_thresh)[0]
fg_inds = np.concatenate((fg_inds, roi_assignment), axis=0) # consider the roi which has max_overlaps with gt as fg
easy_bg_inds = np.nonzero((max_overlaps < cfg.RCNN.CLS_BG_THRESH_LO))[0]
hard_bg_inds = np.nonzero((max_overlaps < cfg.RCNN.CLS_BG_THRESH) &
(max_overlaps >= cfg.RCNN.CLS_BG_THRESH_LO))[0]
fg_num_rois = fg_inds.size
bg_num_rois = hard_bg_inds.size + easy_bg_inds.size
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = np.random.permutation(fg_num_rois)
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = cfg.RCNN.ROI_PER_IMAGE - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(np.random.rand(cfg.RCNN.ROI_PER_IMAGE ) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(gt_boxes3d).long()
fg_inds = fg_inds[rand_num]
fg_rois_per_this_image = cfg.RCNN.ROI_PER_IMAGE
bg_rois_per_this_image = 0
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = cfg.RCNN.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image)
fg_rois_per_this_image = 0
else:
import pdb
pdb.set_trace()
raise NotImplementedError
# augment the rois by noise
roi_list, roi_iou_list, roi_gt_list = [], [], []
if fg_rois_per_this_image > 0:
fg_rois_src = roi_boxes3d[fg_inds].copy()
gt_of_fg_rois = gt_boxes3d[gt_assignment[fg_inds]]
fg_rois, fg_iou3d = self.aug_roi_by_noise_batch(fg_rois_src, gt_of_fg_rois, aug_times=10)
roi_list.append(fg_rois)
roi_iou_list.append(fg_iou3d)
roi_gt_list.append(gt_of_fg_rois)
if bg_rois_per_this_image > 0:
bg_rois_src = roi_boxes3d[bg_inds].copy()
gt_of_bg_rois = gt_boxes3d[gt_assignment[bg_inds]]
bg_rois, bg_iou3d = self.aug_roi_by_noise_batch(bg_rois_src, gt_of_bg_rois, aug_times=1)
roi_list.append(bg_rois)
roi_iou_list.append(bg_iou3d)
roi_gt_list.append(gt_of_bg_rois)
rois = np.concatenate(roi_list, axis=0)
iou_of_rois = np.concatenate(roi_iou_list, axis=0)
gt_of_rois = np.concatenate(roi_gt_list, axis=0)
# collect extra features for point cloud pooling
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [rpn_intensity.reshape(-1, 1), seg_mask.reshape(-1, 1)]
else:
pts_extra_input_list = [seg_mask.reshape(-1, 1)]
if cfg.RCNN.USE_DEPTH:
pts_depth = (np.linalg.norm(rpn_xyz, ord=2, axis=1) / 70.0) - 0.5
pts_extra_input_list.append(pts_depth.reshape(-1, 1))
pts_extra_input = np.concatenate(pts_extra_input_list, axis=1)
pts_input, pts_features, pts_empty_flag = roipool3d_utils.roipool3d_cpu(rois, rpn_xyz, rpn_features,
pts_extra_input,
cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS,
canonical_transform=False)
# data augmentation
if cfg.AUG_DATA and self.mode == 'TRAIN':
for k in range(rois.__len__()):
aug_pts = pts_input[k, :, 0:3].copy()
aug_gt_box3d = gt_of_rois[k].copy()
aug_roi_box3d = rois[k].copy()
# calculate alpha by ry
temp_boxes3d = np.concatenate([aug_roi_box3d.reshape(1, 7), aug_gt_box3d.reshape(1, 7)], axis=0)
temp_x, temp_z, temp_ry = temp_boxes3d[:, 0], temp_boxes3d[:, 2], temp_boxes3d[:, 6]
temp_beta = np.arctan2(temp_z, temp_x).astype(np.float64)
temp_alpha = -np.sign(temp_beta) * np.pi / 2 + temp_beta + temp_ry
# data augmentation
aug_pts, aug_boxes3d, aug_method = self.data_augmentation(aug_pts, temp_boxes3d, temp_alpha,
mustaug=True, stage=2)
# assign to original data
pts_input[k, :, 0:3] = aug_pts
rois[k] = aug_boxes3d[0]
gt_of_rois[k] = aug_boxes3d[1]
valid_mask = (pts_empty_flag == 0).astype(np.int32)
# regression valid mask
reg_valid_mask = (iou_of_rois > cfg.RCNN.REG_FG_THRESH).astype(np.int32) & valid_mask
# classification label
cls_label = (iou_of_rois > cfg.RCNN.CLS_FG_THRESH).astype(np.int32)
invalid_mask = (iou_of_rois > cfg.RCNN.CLS_BG_THRESH) & (iou_of_rois < cfg.RCNN.CLS_FG_THRESH)
cls_label[invalid_mask] = -1
cls_label[valid_mask == 0] = -1
# canonical transform and sampling
pts_input_ct, gt_boxes3d_ct = self.canonical_transform_batch(pts_input, rois, gt_of_rois)
pts_features = np.concatenate((pts_input_ct[:,:,3:],pts_features), axis=2)
pts_input_ct = pts_input_ct[:,:,0:3]
sample_info = {'sample_id': sample_id,
'pts_input': pts_input_ct,
'pts_features': pts_features,
'cls_label': cls_label,
'reg_valid_mask': reg_valid_mask,
'gt_boxes3d_ct': gt_boxes3d_ct,
'roi_boxes3d': rois,
'roi_size': rois[:, 3:6],
'gt_boxes3d': gt_of_rois}
return sample_info
@staticmethod
def rcnn_input_sample(pts_input, pts_features):
choice = np.random.choice(pts_input.shape[0], cfg.RCNN.NUM_POINTS, replace=True)
if pts_input.shape[0] < cfg.RCNN.NUM_POINTS:
choice[:pts_input.shape[0]] = np.arange(pts_input.shape[0])
np.random.shuffle(choice)
pts_input = pts_input[choice]
pts_features = pts_features[choice]
return pts_input, pts_features
def aug_roi_by_noise(self, roi_info):
"""
add noise to original roi to get aug_box3d
:param roi_info:
:return:
"""
roi_box3d, gt_box3d = roi_info['roi_box3d'], roi_info['gt_box3d']
original_iou = roi_info['iou3d']
temp_iou = cnt = 0
pos_thresh = min(cfg.RCNN.REG_FG_THRESH, cfg.RCNN.CLS_FG_THRESH)
gt_corners = kitti_utils.boxes3d_to_corners3d_velodyne(gt_box3d.reshape(-1, 7))
aug_box3d = roi_box3d
while temp_iou < pos_thresh and cnt < 10:
if roi_info['type'] == 'gt':
aug_box3d = self.random_aug_box3d(roi_box3d) # GT, must random
else:
if np.random.rand() < 0.2:
aug_box3d = roi_box3d # p=0.2 to keep the original roi box
else:
aug_box3d = self.random_aug_box3d(roi_box3d)
aug_corners = kitti_utils.boxes3d_to_corners3d(aug_box3d.reshape(-1, 7))
iou3d = kitti_utils.get_iou3d_velodyne(aug_corners, gt_corners)
temp_iou = iou3d[0][0]
cnt += 1
if original_iou < pos_thresh: # original bg, break
break
return aug_box3d
@staticmethod
def random_aug_box3d(box3d):
"""
:param box3d: (7) [x, y, z, h, w, l, rz]
random shift, scale, orientation
"""
if cfg.RCNN.REG_AUG_METHOD == 'single':
pos_shift = (np.random.rand(3) - 0.5) # [-0.5 ~ 0.5]
hwl_scale = (np.random.rand(3) - 0.5) / (0.5 / 0.15) + 1.0 #
angle_rot = (np.random.rand(1) - 0.5) / (0.5 / (np.pi / 12)) # [-pi/12 ~ pi/12]
aug_box3d = np.concatenate([box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale,
box3d[6:7] + angle_rot])
return aug_box3d
elif cfg.RCNN.REG_AUG_METHOD == 'multiple':
# pos_range, hwl_range, angle_range, mean_iou
range_config = [[0.2, 0.1, np.pi / 12, 0.7],
[0.3, 0.15, np.pi / 12, 0.6],
[0.5, 0.15, np.pi / 9, 0.5],
[0.8, 0.15, np.pi / 6, 0.3],
[1.0, 0.15, np.pi / 3, 0.2]]
idx = np.random.randint(len(range_config))
pos_shift = ((np.random.rand(3) - 0.5) / 0.5) * range_config[idx][0]
hwl_scale = ((np.random.rand(3) - 0.5) / 0.5) * range_config[idx][1] + 1.0
angle_rot = ((np.random.rand(1) - 0.5) / 0.5) * range_config[idx][2]
aug_box3d = np.concatenate([box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale, box3d[6:7] + angle_rot])
return aug_box3d
elif cfg.RCNN.REG_AUG_METHOD == 'normal':
x_shift = np.random.normal(loc=0, scale=0.3)
y_shift = np.random.normal(loc=0, scale=0.3)
z_shift = np.random.normal(loc=0, scale=0.2)
h_shift = np.random.normal(loc=0, scale=0.25)
w_shift = np.random.normal(loc=0, scale=0.5)
l_shift = np.random.normal(loc=0, scale=0.15)
rz_shift = ((np.random.rand() - 0.5) / 0.5) * np.pi / 12
aug_box3d = np.array([box3d[0] + x_shift, box3d[1] + y_shift, box3d[2] + z_shift, box3d[3] + h_shift,
box3d[4] + w_shift, box3d[5] + l_shift, box3d[6] + rz_shift])
return aug_box3d
else:
raise NotImplementedError
@staticmethod
def canonical_transform(pts_input, roi_box3d, gt_box3d):
roi_rz = roi_box3d[6] % (2 * np.pi) # 0 ~ 2pi
roi_center = roi_box3d[0:3]
# shift to center
pts_input[:, [0, 1, 2]] = pts_input[:, [0, 1, 2]] - roi_center
gt_box3d_ct = np.copy(gt_box3d)
gt_box3d_ct[0:3] = gt_box3d_ct[0:3] - roi_center
# rotate to the direction of head
gt_box3d_ct = kitti_utils.rotate_pc_along_z(gt_box3d_ct.reshape(1, 7), roi_rz).reshape(7)
gt_box3d_ct[6] = gt_box3d_ct[6] - roi_rz
pts_input = kitti_utils.rotate_pc_along_z(pts_input, roi_ry)
return pts_input, gt_box3d_ct
@staticmethod
def canonical_transform_batch(pts_input, roi_boxes3d, gt_boxes3d):
"""
:param pts_input: (N, npoints, 3 + C)
:param roi_boxes3d: (N, 7)
:param gt_boxes3d: (N, 7)
:return:
"""
roi_rz = roi_boxes3d[:, 6] % (2 * np.pi) # 0 ~ 2pi
roi_center = roi_boxes3d[:, 0:3]
# shift to center
pts_input[:, :, [0, 1, 2]] = pts_input[:, :, [0, 1, 2]] - roi_center.reshape(-1, 1, 3)
gt_boxes3d_ct = np.copy(gt_boxes3d)
gt_boxes3d_ct[:, 0:3] = gt_boxes3d_ct[:, 0:3] - roi_center
# rotate to the direction of head
gt_boxes3d_ct = kitti_utils.rotate_pc_along_z(torch.from_numpy(gt_boxes3d_ct.reshape(-1, 1, 7)).float(),
torch.from_numpy(roi_rz).float()).numpy().reshape(-1, 7)
gt_boxes3d_ct[:, 6] = gt_boxes3d_ct[:, 6] - roi_rz
pts_input = kitti_utils.rotate_pc_along_z(torch.from_numpy(pts_input).float(),
torch.from_numpy(roi_rz).float()).numpy()
return pts_input, gt_boxes3d_ct
def data_augmentation(self, aug_pts_rect, aug_gt_boxes3d, gt_alpha, sample_id=None, mustaug=False, stage=1):
"""
:param aug_pts_rect: (N, 3)
:param aug_gt_boxes3d: (N, 7)
:param gt_alpha: (N)
:return:
"""
aug_list = cfg.AUG_METHOD_LIST
aug_enable = 1 -
|
np.random.rand(3)
|
numpy.random.rand
|
import tensorflow as tf
import numpy as np
from tensorflow.keras import Model
class IDEncoder(Model):
def __init__(self, args, model_path, intermediate_layers_names=None):
super().__init__()
self.args = args
self.mean = (91.4953, 103.8827, 131.0912)
base_model = tf.keras.models.load_model(model_path)
if intermediate_layers_names:
outputs = [base_model.get_layer(name).output for name in intermediate_layers_names]
else:
outputs = []
# Add output of the network in any case
outputs.append(base_model.layers[-2].output)
self.model = tf.keras.Model(base_model.inputs, outputs)
def crop_faces(self, img):
ps = []
for i in range(img.shape[0]):
oneimg = img[i]
try:
box = tf.numpy_function(self.mtcnn.detect_faces, [oneimg], np.uint8)
box = [z.numpy() for z in box[:4]]
x1, y1, w, h = box
x_expand = w * 0.3
y_expand = h * 0.3
x1 = int(np.maximum(x1 - x_expand // 2, 0))
y1 = int(
|
np.maximum(y1 - y_expand // 2, 0)
|
numpy.maximum
|
"""This Module contains basic Multi-Armed Bandit Algorithms."""
import random
from abc import ABC, abstractmethod
import numpy as np
class MABInterface(ABC):
"""Abstract base class for various Multi-Armed Bandit Algorithms."""
@abstractmethod
def select_arm(self) -> None:
"""Decide which arm should be selected."""
pass
@abstractmethod
def update(self) -> None:
"""Update the information about the arms."""
pass
@abstractmethod
def batch_update(self) -> None:
"""Update the information about the arms."""
pass
class EpsilonGreedy(MABInterface):
"""Epsilon Greedy Algorithm for Multi-Armed Bandit problems."""
def __init__(self, epsilon: float, n_arms: int, batch_size: int=None) -> None:
"""Initialize class.
:param epsilon: the hyper-parameter which represents how often the algorithm explore.
:param n_arms: the number of given arms.
:param batch_size: the size of information about rewards given in a update.
"""
self.epsilon = epsilon
self.n_arms = n_arms
self.counts = np.zeros(self.n_arms, dtype=int)
self.values = np.zeros(self.n_arms)
self._values = np.zeros(self.n_arms)
self.batch_size = batch_size
self.data_size = 0
def select_arm(self) -> int:
"""Decide which arm should be selected.
:return: index of the selected arm.
"""
result = random.randrange(self.values.shape[0])
if np.random.rand() > self.epsilon:
result = np.argmax(self.values)
return result
def update(self, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.counts[chosen_arm] += 1
n = self.counts[chosen_arm]
value = self.values[chosen_arm]
new_value = (value * (n - 1) / n) + reward / n
self.values[chosen_arm] = new_value
def batch_update(self, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms with a new batch of data.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.data_size += 1
self.counts[chosen_arm] += 1
n = self.counts[chosen_arm]
value = self._values[chosen_arm]
new_value = (value * (n - 1) / n) + reward / n
self._values[chosen_arm] = new_value
if self.data_size % self.batch_size == 0:
self.values = np.copy(self._values)
class SoftMax(MABInterface):
"""SoftMax Algorithm for Multi-Armed Bandit problems."""
def __init__(self, temperature: float, n_arms: int, batch_size: int=None) -> None:
"""Initialize class.
:param temperature: the hyper-parameter which represents how much the algorithm uses explored information about arms.
:param n_arms: the number of given arms.
:param batch_size: the size of information about rewards given in a update.
"""
self.temperature = temperature
self.n_arms = n_arms
self.counts = np.zeros(self.n_arms, dtype=int)
self.values = np.zeros(self.n_arms)
self._values = np.zeros(self.n_arms)
self.batch_size = batch_size
self.data_size = 0
def select_arm(self) -> int:
"""Decide which arm should be selected.
:return: index of the selected arm.
"""
z = np.sum(np.exp(self.values) / self.temperature)
probs = (np.exp(self.values) / self.temperature) / z
return np.random.choice(self.counts.shape[0], p=probs)
def update(self, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.counts[chosen_arm] += 1
n = self.counts[chosen_arm]
new_value = ((n - 1) / n) * self.values[chosen_arm] + (1 / n) * reward
self.values[chosen_arm] = new_value
def batch_update(self, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms with a new batch of data.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.data_size += 1
self.counts[chosen_arm] += 1
n = self.counts[chosen_arm]
value = self._values[chosen_arm]
new_value = (value * (n - 1) / n) + reward / n
self._values[chosen_arm] = new_value
if self.data_size % self.batch_size == 0:
self.values = np.copy(self._values)
class UCB1(MABInterface):
"""Upper Confidence Bound1 Algorithm for Multi-Armed Bandit problems with rewards provided from gaussian distributions."""
def __init__(self, n_arms: int, batch_size: int=None) -> None:
"""Initialize class.
:param n_arms: the number of given arms.
:param batch_size: the size of information about rewards given in a update.
"""
self.n_arms = n_arms
self.counts = np.zeros(self.n_arms, dtype=int)
self.values = np.zeros(self.n_arms)
self._values = np.zeros(self.n_arms)
self.batch_size = batch_size
self.data_size = 0
def select_arm(self) -> int:
"""Decide which arm should be selected.
:return: index of the selected arm.
"""
if 0 in self.counts:
result = np.where(self.counts == 0)[0][0]
else:
ucb_values = np.zeros(self.n_arms)
total_counts = sum(self.counts)
bounds = np.sqrt(2 * np.log(total_counts) / self.counts)
ucb_values = self.values + bounds
result = np.argmax(ucb_values)
return result
def update(self, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.counts[chosen_arm] += 1
n = self.counts[chosen_arm]
new_value = ((n - 1) / n) * self.values[chosen_arm] + (1 / n) * reward
self.values[chosen_arm] = new_value
def batch_update(self, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms with a new batch of data.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.data_size += 1
self.counts[chosen_arm] += 1
n = self.counts[chosen_arm]
value = self._values[chosen_arm]
new_value = (value * (n - 1) / n) + reward / n
self._values[chosen_arm] = new_value
if self.data_size % self.batch_size == 0:
self.values = np.copy(self._values)
class UCBTuned(MABInterface):
"""Upper Confidence Bound1 Algorithm for Multi-Armed Bandit problems with rewards provided from bernouill distributions."""
def __init__(self, n_arms: int, batch_size: int=None) -> None:
"""Initialize class.
:param n_arms: the number of given arms.
:param batch_size: the size of information about rewards given in a update.
"""
self.n_arms = n_arms
self.counts = np.zeros(self.n_arms, dtype=int)
self.values = np.zeros(self.n_arms, dtype=float)
self.sigma = np.zeros(self.n_arms, dtype=float)
self._values = np.zeros(self.n_arms, dtype=float)
self._sigma = np.zeros(self.n_arms, dtype=float)
self.batch_size = batch_size
self.data_size = 0
def select_arm(self) -> int:
"""Decide which arm should be selected.
:return: index of the selected arm.
"""
if 0 in self.counts:
result = np.where(self.counts == 0)[0][0]
else:
ucb_values = np.zeros(self.n_arms)
total_counts = sum(self.counts)
bounds1 = np.log(total_counts) / self.counts
bounds2 = np.minimum(1 / 4, self.sigma + 2 * np.log(total_counts) / self.counts)
ucb_values = self.values + np.sqrt(bounds1 * bounds2)
result = np.argmax(ucb_values)
return result
def update(self, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.counts[chosen_arm] += 1
n = self.counts[chosen_arm]
new_value = ((n - 1) / n) * self.values[chosen_arm] + (1 / n) * reward
new_sigma = ((n * ((self.sigma[chosen_arm] ** 2) + (self.values[chosen_arm] ** 2)) + reward ** 2) / (n + 1)) - new_value ** 2
self.values[chosen_arm] = new_value
self.sigma[chosen_arm] = new_sigma
def batch_update(self, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms with a new batch of data.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.data_size += 1
self.counts[chosen_arm] += 1
n = self.counts[chosen_arm]
new_value = ((n - 1) / n) * self._values[chosen_arm] + (1 / n) * reward
new_sigma = ((n * ((self._sigma[chosen_arm] ** 2) + (self._values[chosen_arm] ** 2)) + reward ** 2) / (n + 1)) - new_value ** 2
self._values[chosen_arm] = new_value
self._sigma[chosen_arm] = new_sigma
if self.data_size % self.batch_size == 0:
self.values = np.copy(self._values)
self.sigma =
|
np.copy(self._sigma)
|
numpy.copy
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import numpy as np
# from .constants import *
'''
Functions to deal with wind observations.
'''
def circular_mean(angles):
"""
Compute the arithmetic circular mean, not ignoring NaNs.
Parameters
----------
angles : list or array
The angles for averaging in radians.
Returns
-------
mean : float
The circular mean in radians.
"""
if np.any(np.isnan(angles)):
return np.nan
else:
return nan_circular_mean(angles)
def nan_circular_mean(angles):
"""
Compute the arithmetic circular mean, ignoring NaNs.
Parameters
----------
angles : list or array
The angles for averaging in radians.
Returns
-------
mean : float
The circular mean in radians.
"""
x = np.nansum(np.cos(angles))
y = np.nansum(np.sin(angles))
mean = np.arctan2(y, x)
if mean < 0:
mean = mean + (np.pi*2)
return mean
def circular_mean_deg(angles):
"""
Compute the arithmetic circular mean, not ignoring NaNs.
Parameters
----------
angles : list or array
The angles for averaging in degrees.
Returns
-------
mean : float
The circular mean in degrees.
"""
if np.any(
|
np.isnan(angles)
|
numpy.isnan
|
import pytest
import numpy as np
from bbox import BBox2D, BBox2DList
from bbox.box_modes import XYXY, XYWH
class TestBBox2DList(object):
@classmethod
def setup_class(cls):
cls.n = 10
cls.l = [BBox2D(np.random.randint(0, 1024, size=4))
for _ in range(cls.n)]
cls.bbl = BBox2DList(cls.l)
def test_null(self):
bbl = BBox2DList([])
assert bbl.shape == (0, 4)
def test_len(self):
assert len(self.bbl) == self.n
def test_init(self):
bbl = BBox2DList(self.bbl)
assert np.array_equal(bbl.numpy(), self.bbl.numpy())
def test_init_invalid(self):
with pytest.raises(TypeError):
BBox2DList("1, 2, 3, 4")
def test_init_invalid_element_type(self):
with pytest.raises(TypeError):
BBox2DList(["1, 2, 3, 4", [1, 2, 3, 4]])
def test_init_empty_ndarray(self):
bbl = BBox2DList(np.empty((0, 4)))
assert bbl.bboxes.shape == (0, 4)
def test_init_vector(self):
bbl = BBox2DList(np.asarray([0, 1, 2, 4]))
assert bbl.bboxes.shape == (1, 4)
def test_init_invalid_dims(self):
with pytest.raises(ValueError):
BBox2DList(
|
np.random.rand(10, 3)
|
numpy.random.rand
|
import numpy as np
from lqr_control import control
from VFA_Net import NeuralNetwork
nn_arq = [
{"input_dim": 1, "output_dim": 64, "activation": "quadratic"},
{"input_dim": 64, "output_dim": 1, "activation": "none"},
]
model = NeuralNetwork(nn_arq, bias = False, double = "yes")
A = np.array(1).reshape(1,1)
B = np.array(1).reshape(1,1)
Q = np.array(1).reshape(1,1)
R1 = np.array(1).reshape(1,1)
R2 = np.array(1.2).reshape(1,1)
x0 = np.array(-1).reshape(1,1)
u0 = np.array(0).reshape(1,1)
# number of time steps to simulate
T = 30
# number of iterations of the dynamical systems for training
NUM_TRIALS = 1000
ALPHA = 100
GAMMA = 0.9
K_1, _, _ = control.dlqr(A,B,Q,R1)
x_1, u_1 = control.simulate_discrete(A,B,K_1,x0,u0,T)
K_2, _, _ = control.dlqr(A,B,Q,R2)
x_2, u_2 = control.simulate_discrete(A,B,K_2,x0,u0,T)
def loss(target, prediction, alpha=1):
return float((1/(alpha**2))*np.square(target-alpha*prediction))
def train(K):
loss_history = []
for i in range(NUM_TRIALS):
x = np.random.randn(1).reshape(1,1)
#print('yhat = '+str(y_hat))
total_loss = 0
for t in range(T):
u = -np.matmul(K,x)
r = np.matmul(x,np.matmul(Q,x)) + np.matmul(u,np.matmul(R1,u))
y = r + ALPHA*GAMMA*model(np.matmul(A,x) + np.matmul(B,u))
y_hat = model.net_forward(x)
lr = 0.001
total_loss += loss(y, y_hat, ALPHA)
model.net_backward(y, y_hat, ALPHA)
model.update_wb(lr)
x = np.matmul(A,x) + np.matmul(B,u)
#output
if (i+1)%(NUM_TRIALS/10) == 0 or i == 0:
print('trial {}/{}'.format(i+1,NUM_TRIALS))
print("y = "+str(model(np.array(1).reshape(1,1))))
print("u = "+str(u))
print("r = "+str(r))
print("x+= "+str(np.matmul(A,x) + np.matmul(B,u)))
loss_history.append(total_loss/T)
return loss_history
print("y_0 = "+str(model(
|
np.array(1)
|
numpy.array
|
"""Image reconstruction from raw PET data"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018"
#------------------------------------------------------------------------------
import numpy as np
import random
import sys
import os
import scipy.ndimage as ndi
from collections import namedtuple
import logging
from tqdm.auto import trange
import petprj
from niftypet.nipet.img import mmrimg
from niftypet.nipet import mmrnorm
from niftypet.nipet import mmraux
from niftypet import nimpa
# for isotope info
import resources
#reconstruction mode:
# 0 - no attenuation and no scatter
# 1 - attenuation and no scatter
# 2 - attenuation and scatter given as input parameter
# 3 - attenuation and scatter
recModeStr = ['_noatt_nosct_', '_nosct_', '_noatt_', '_', '_ute_']
# fwhm in [mm]
def fwhm2sig(fwhm, Cnt):
return (0.1*fwhm/Cnt['SZ_VOXY']) / (2*(2*np.log(2))**.5)
#=========================================================================
# OSEM RECON
#-------------------------------------------------------------------------
def get_subsets14(n, params):
'''Define the n-th subset out of 14 in the transaxial projection space
'''
Cnt = params['Cnt']
txLUT = params['txLUT']
# just for check of sums (have to be equal for all subsets to make them balanced)
aisum = np.sum(txLUT['msino'], axis=0)
# number of subsets
N = 14
# projections per subset
P = Cnt['NSANGLES']/N
# the remaining projections which have to be spread over the N subsets with a given frequency
fs = N/float(P-N)
# generate sampling pattern for subsets up to N out of P
sp = np.array([np.arange(i,Cnt['NSANGLES'],P) for i in range(N)])
# ======================================
S = np.zeros((N,P),dtype=np.int16)
# ======================================
# sum of sino angle projections
totsum = np.zeros(N, dtype=np.int32)
# iterate subset (which is also the angle iterator within block b)
for s in range(N):
# list of sino angular indexes for a given subset
si = []
#::::: iterate sino blocks. This bit may be unnecessary, it can be taken directly from sp array
for b in range(N):
#--angle index within a sino block depending on subset s
ai = (s+b)%N
#--angle index for whole sino
sai = sp[ai, b]
si.append(sai)
totsum[s] += aisum[sai]
#:::::
# deal with the remaining part, ie, P-N per block
rai = np.int16( np.floor( np.arange(s,2*N,fs)[:4]%N ) )
for i in range(P-N):
sai = sp[-1,rai[i]]+i+1
totsum[s] += aisum[sai]
si.append(sai)
# print si
S[s] = np.array((si))
# get the projection bin index for transaxial gpu sinos
tmsk = txLUT['msino']>0
Smsk = -1*np.ones(tmsk.shape, dtype=np.int32)
Smsk[tmsk] = range(Cnt['Naw'])
iprj = Smsk[:,S[n]]
iprj = iprj[iprj>=0]
# n=0; plot(S[n,:-4],ones(14), '*'); plot(S[n,-4:],ones(4), 'o')
# Smsk = -1*np.ones(tmsk.shape, dtype=np.int32)
# q=-1*ones(Cnt['Naw'])
# q[iprj] = 3
# Smsk[tmsk] = q
return iprj, S
#---------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------#
#=== OSEM image reconstruction with several modes (with/without scatter and/or attenuation correction) ===#
def osemone(datain, mumaps, hst, scanner_params,
recmod=3, itr=4, fwhm=0., mask_radius=29.,
sctsino=np.array([]),
outpath='',
store_img=False, frmno='', fcomment='',
store_itr=[],
emmskS=False,
ret_sinos=False,
attnsino = None,
randsino = None,
normcomp = None):
log = logging.getLogger(__name__)
#---------- sort out OUTPUT ------------
#-output file name for the reconstructed image, initially assume n/a
fout = 'n/a'
if store_img or store_itr:
if outpath=='':
opth = os.path.join( datain['corepath'], 'reconstructed' )
else:
opth = outpath
mmraux.create_dir(opth)
if ret_sinos:
return_ssrb = True
return_mask = True
else:
return_ssrb = False
return_mask = False
#----------
# Get particular scanner parameters: Constants, transaxial and axial LUTs
Cnt = scanner_params['Cnt']
txLUT = scanner_params['txLUT']
axLUT = scanner_params['axLUT']
import time
from niftypet import nipet
# from niftypet.nipet.sct import mmrsct
# from niftypet.nipet.prj import mmrhist
log.debug('reconstruction in mode:%d' % recmod)
# get object and hardware mu-maps
muh, muo = mumaps
# get the GPU version of the image dims
mus = mmrimg.convert2dev(muo+muh, Cnt)
if Cnt['SPN']==1:
snno = Cnt['NSN1']
elif Cnt['SPN']==11:
snno = Cnt['NSN11']
# remove gaps from the prompt sino
psng = mmraux.remgaps(hst['psino'], txLUT, Cnt)
#=========================================================================
# GET NORM
#-------------------------------------------------------------------------
if normcomp == None:
ncmp, _ = mmrnorm.get_components(datain, Cnt)
else:
ncmp = normcomp
log.warning('using user-defined normalisation components')
nsng = mmrnorm.get_sinog(datain, hst, axLUT, txLUT, Cnt, normcomp=ncmp)
#=========================================================================
#=========================================================================
# ATTENUATION FACTORS FOR COMBINED OBJECT AND BED MU-MAP
#-------------------------------------------------------------------------
#> combine attenuation and norm together depending on reconstruction mode
if recmod==0:
asng = np.ones(psng.shape, dtype=np.float32)
else:
#> check if the attenuation sino is given as an array
if isinstance(attnsino, np.ndarray) \
and attnsino.shape==(Cnt['NSN11'], Cnt['NSANGLES'], Cnt['NSBINS']):
asng = mmraux.remgaps(attnsino, txLUT, Cnt)
log.info('using provided attenuation factor sinogram')
elif isinstance(attnsino, np.ndarray) \
and attnsino.shape==(Cnt['Naw'], Cnt['NSN11']):
asng = attnsino
log.info('using provided attenuation factor sinogram')
else:
asng = np.zeros(psng.shape, dtype=np.float32)
petprj.fprj(asng, mus, txLUT, axLUT, np.array([-1], dtype=np.int32), Cnt, 1)
#> combine attenuation and normalisation
ansng = asng*nsng
#=========================================================================
#=========================================================================
# Randoms
#-------------------------------------------------------------------------
if isinstance(randsino, np.ndarray):
rsino = randsino
rsng = mmraux.remgaps(randsino, txLUT, Cnt)
else:
rsino, snglmap = nipet.randoms(hst, scanner_params)
rsng = mmraux.remgaps(rsino, txLUT, Cnt)
#=========================================================================
#=========================================================================
# SCAT
#-------------------------------------------------------------------------
if recmod==2:
if sctsino.size>0:
ssng = mmraux.remgaps(sctsino, txLUT, Cnt)
elif sctsino.size==0 and os.path.isfile(datain['em_crr']):
emd = nimpa.getnii(datain['em_crr'])
ssn = nipet.vsm(
datain,
mumaps,
emd['im'],
hst,
rsino,
scanner_params,
prcnt_scl=2*Cnt['ETHRLD'],
emmsk=False)
ssng = mmraux.remgaps(ssn, txLUT, Cnt)
else:
raise ValueError(
"No emission image available for scatter estimation! " +
" Check if it's present or the path is correct.")
else:
ssng = np.zeros(rsng.shape, dtype=rsng.dtype)
#=========================================================================
log.debug('------ OSEM (%d) -------' % itr)
#------------------------------------
Sn = 14 # number of subsets
#-get one subset to get number of projection bins in a subset
Sprj, s = get_subsets14(0,scanner_params)
Nprj = len(Sprj)
#-init subset array and sensitivity image for a given subset
sinoTIdx = np.zeros((Sn, Nprj+1), dtype=np.int32)
#-init sensitivity images for each subset
imgsens = np.zeros((Sn, Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']), dtype=np.float32)
for n in range(Sn):
sinoTIdx[n,0] = Nprj #first number of projection for the given subset
sinoTIdx[n,1:], s = get_subsets14(n,scanner_params)
# sensitivity image
petprj.bprj(imgsens[n,:,:,:], ansng[sinoTIdx[n,1:],:], txLUT, axLUT, sinoTIdx[n,1:], Cnt )
#-------------------------------------
#-mask for reconstructed image. anything outside it is set to zero
msk = mmrimg.get_cylinder(Cnt, rad=mask_radius, xo=0, yo=0, unival=1, gpu_dim=True)>0.9
#-init image
img =
|
np.ones((Cnt['SZ_IMY'], Cnt['SZ_IMX'], Cnt['SZ_IMZ']), dtype=np.float32)
|
numpy.ones
|
# coding: utf-8
# Distributed under the terms of the MIT License.
from __future__ import division
import numpy as np
from itertools import product
# import collections
from ababe.stru.element import Specie, GhostSpecie
from ababe.stru.site import Site
from itertools import combinations
from scipy.spatial import cKDTree
from operator import itemgetter
from collections import MutableSequence
import spglib
import xxhash
class SitesGrid(object):
"""
Grid object. Used for constructed grids where to put the atoms on.
Like a chess board.
"""
def __init__(self, sites):
self._sites = sites
self._depth = len(sites)
self._width = len(sites[0])
self._length = len(sites[0][0])
@classmethod
def sea(cls, depth, width, length, sp=GhostSpecie()):
sites = [[[sp for _ in range(length)]
for _ in range(width)]
for _ in range(depth)]
return cls(sites)
@property
def sites(self):
return self._sites
@property
def depth(self):
return self._depth
@property
def width(self):
return self._width
@property
def length(self):
return self._length
def __getitem__(self, pos):
d, w, l = pos
return self._sites[d][w][l]
def __setitem__(self, pos, sp):
d, w, l = pos
self._sites[d][w][l] = sp
def __eq__(self, other):
if other is None:
return False
return self._sites == other._sites
def deepCopy(self):
g = SitesGrid(self._sites)
g._sites = [x[:][:] for x in self._sites]
return g
def to_array(self):
mfunc = np.vectorize(lambda sp: sp.Z)
arr = mfunc(np.array(self._sites))
return arr
@classmethod
def from_array(cls, arr):
mfunc = np.vectorize(lambda n: Specie.from_num(n))
sarr = mfunc(arr)
return cls(sarr.tolist())
@classmethod
def random_fill(cls, bsp, size, sp):
# d, w, l = size
rarr = (sp.Z - bsp.Z)*np.random.randint(2, size=size)
sarr = np.zeros(size, dtype=np.int)+bsp.Z
arr = sarr + rarr
return cls.from_array(arr)
@classmethod
def gen_speckle(cls, ssp, size, sp, noa):
d, w, l = size
n = d * w * l
i_sea = ssp.Z
i_speckle = sp.Z
for w_on in combinations(range(n), noa):
out = [i_sea]*n
for index in w_on:
out[index] = i_speckle
arr = np.array(out, dtype=np.int).reshape(size)
yield cls.from_array(arr)
class CStru(object):
def __init__(self, m, sg):
self._matrix = m
self._sites_grid = sg
self.depth = sg.depth
self.width = sg.width
self.length = sg.length
@property
def m(self):
return self._matrix
@property
def sites_grid(self):
return self._sites_grid
# @property
# def depth(self):
# return self.sites_grid.depth
# @property
# def width(self):
# return self.sites_grid.width
# @property
# def length(self):
# return self.sites_grid.length
# def get_grid(self):
# return self._sites_grid.sites
def get_array(self):
return self._sites_grid.to_array()
def __eq__(self, other):
if other is None:
return False
return other.m == self.m and other.sites_grid == self.sites_grid
@classmethod
def from_array(cls, m, arr):
return cls(m, SitesGrid.from_array(arr))
@classmethod
def gen_speckle(cls, m, ssp, size, sp, noa):
for stru in SitesGrid.gen_speckle(ssp, size, sp, noa):
yield cls(m, stru)
@staticmethod
def _yield_position(d, w, l):
for c in range(d):
for b in range(w):
for a in range(l):
yield [c, b, a]
def get_cell(self):
# from fractions import Fraction
marr = np.array(self._matrix, dtype=np.float64).reshape((3, 3))
g_arr = self._sites_grid.to_array()
d = self.depth
w = self.width
l = self.length
arr_bas = marr*np.array([d, w, l], dtype=np.int).reshape((3, 1))
grid_position = np.array([p for p in CStru._yield_position(d, w, l)])
frac = np.array([1/d, 1/w, 1/l], dtype=np.float64).reshape((1, 3))
# round_frac = np.around(frac, decimals=22)
arr_pos = grid_position * frac
arr_num = np.array([i for i in g_arr.flat])
return (arr_bas, arr_pos, arr_num)
def get_gcell(self):
spg_cell = self.get_cell()
gcell = GeneralCell(spg_cell[0], spg_cell[1], spg_cell[2])
return gcell
def get_lattice(self):
arr_bas, arr_pos, arr_num = self.get_cell()
return arr_bas
def get_positions(self):
arr_bas, arr_pos, arr_num = self.get_cell()
return arr_pos
def get_atoms(self):
arr_bas, arr_pos, arr_num = self.get_cell()
return arr_num
@staticmethod
def get_id_matrix(cell, d, w, l):
arr_num = cell[2]
return arr_num.reshape((d, w, l))
def get_midpoint(self):
d = self.depth
w = self.width
l = self.length
return (d//2, w//2, l//2)
# @staticmethod
# def _pos2coor(pos):
# a, b = np.array(self.m)
# x, y = pos
# coor = a*x + b*y # an array
# return tuple(coor)
def get_neighbors(self, pos, delta):
def _pos2coor(pos):
a, b, c = np.array(self.m)
x, y, z = pos
coor = a*x + b*y + c*z # an array
return tuple(coor)
def p_gen():
for z in range(self.depth):
for x in range(self.width):
for y in range(self.length):
yield(x, y, z)
point = _pos2coor(pos)
# w = self.width
# l = self.length
coor_map = {p: _pos2coor(p) for p in p_gen()}
del coor_map[pos]
points = list(coor_map.values())
points_tree = cKDTree(points)
ind = points_tree.query_ball_point(point, delta)
neighbors = itemgetter(*ind)(list(coor_map.keys()))
return set(neighbors)
class GeneralCell(object):
"""
A Cell data structure used for generate all nonduplicated structure.
Initialized by three np.array
"""
def __init__(self, lattice, positions, numbers, symprec=1e-3):
self._lattice = lattice
init_index = self._get_new_id_seq(positions, numbers)
# Following two line sort positions and numbers
self._positions = positions[init_index]
self._numbers = numbers[init_index]
self._spg_cell = (self._lattice, self._positions, self._numbers)
self._num_count = numbers.size
self.symprec = symprec
def get_speckle_num(self, sp):
from collections import Counter
num = Counter(self.numbers)[sp.Z]
# num = num_count[atom]
return num
@staticmethod
def _get_new_id_seq(pos, numbers):
"""
A helper function to produce the new sequence of the transformed
structure. Algs is sort the position back to init and use the index
to sort numbers.
"""
# transfer the atom position into >=0 and <=1
pos = np.around(pos, decimals=3)
func_tofrac = np.vectorize(lambda x: round((x % 1), 3))
o_pos = func_tofrac(pos)
# round_o_pos = np.around(o_pos, decimals=3)
# z, y, x = round_o_pos[:, 2], round_o_pos[:, 1], round_o_pos[:, 0]
z, y, x = o_pos[:, 2], o_pos[:, 1], o_pos[:, 0]
inds = np.lexsort((z, y, x))
return inds
@property
def spg_cell(self):
return self._spg_cell
@property
def lattice(self):
return self._lattice
@property
def positions(self):
return self._positions
@property
def numbers(self):
return self._numbers
@numbers.setter
def numbers(self, arr_numbers):
self._numbers = arr_numbers
@property
def comment(self):
from collections import Counter, OrderedDict
atoms_name_list = list(map(lambda x: Specie.to_name(x),
list(self.numbers)))
d = Counter(atoms_name_list)
ordered_atoms = OrderedDict(sorted(d.items(),
key=lambda x: Specie(x[0]).Z))
if 'G' in ordered_atoms:
del ordered_atoms['G']
comment = ''.join(['{}{}'.format(k, v)
for k, v in ordered_atoms.items()])
return comment
@property
def num_count(self):
"""
number of atoms
"""
return self._num_count
def get_degeneracy(self, sym_perm):
"""
input sym_perm, is the symmetry permutation table
of the parent strucutre.
"""
pool = dict()
for sym in sym_perm:
numbers_new = self.numbers[sym]
n_id = self.get_hash(numbers_new)
pool[n_id] = None
return len(pool)
@property
def id(self):
num_id = xxhash.xxh64(self.numbers).intdigest()
return num_id
@staticmethod
def get_hash(numbers):
return xxhash.xxh64(numbers).intdigest()
def get_spacegroup(self, sym=1e-3):
return spglib.get_spacegroup(self._spg_cell, symprec=sym)
def get_symmetry(self):
"""
Symmetry operations are obtained as a dictionary.
The key rotation contains a numpy array of integer,
which is “number of symmetry operations” x “3x3 matrices”.
The key translation contains a numpy array of float,
which is “number of symmetry operations” x “vectors”.
"""
symmetry = spglib.get_symmetry(self._spg_cell, symprec=self.symprec)
return symmetry
def get_symmetry_permutation(self):
"""
This a object function to get the permutation group operators.
Represented as a table.
"""
sym_perm = []
numbers = [i for i in range(self.num_count)]
sym_mat = spglib.get_symmetry(self._spg_cell, symprec=self.symprec)
ops = [(r, t) for r, t in zip(sym_mat['rotations'],
sym_mat['translations'])]
for r, t in ops:
pos_new = np.transpose(np.matmul(r, self._positions.T)) + t
perm = self._get_new_id_seq(pos_new, numbers)
sym_perm.append(perm)
return sym_perm
def get_wyckoffs(self):
symdb = spglib.get_symmetry_dataset(self._spg_cell, symprec=self.symprec)
return symdb['wyckoffs']
@classmethod
def from_poscar(cls, poscar_file):
pass
def is_primitive(self):
primitive_cell = spglib.find_primitive(self.spg_cell, symprec=self.symprec)
return primitive_cell[2].size == self.spg_cell[2].size
def get_refined_cell(self):
"""
Using spglib's standardize_cell method to
refine the cell of giving.
If self is a non-primitive cell, the number of
atoms will reduced.
else will return a refined cell.
"""
rcell = (self.lattice, self.positions, self.numbers)
lattice, positions, numbers = spglib.standardize_cell(rcell, to_primitive=False,
no_idealize=False, symprec=self.symprec)
return self.__class__(lattice, positions, numbers)
def get_refined_pcell(self):
"""
Using spglib's standardize_cell method to
refine the cell of giving.
If self is a non-primitive cell, the number of
atoms will reduced.
else will return a refined primitive cell.
"""
rcell = (self.lattice, self.positions, self.numbers)
lattice, positions, numbers = spglib.standardize_cell(rcell, to_primitive=True,
no_idealize=False, symprec=self.symprec)
return self.__class__(lattice, positions, numbers)
def get_shaped_cell(self):
"""
The numbers of atoms is not changed, but the lattice shape
is optimized to be fulled.
"""
n = self.numbers.size
numbers = self.numbers.copy()
index = np.array([i for i in range(n)])
rcell = (self.lattice, self.positions, index)
lattice, positions, new_index = spglib.standardize_cell(rcell, to_primitive=True,
no_idealize=False, symprec=self.symprec)
numbers = numbers[new_index]
return self.__class__(lattice, positions, numbers)
def get_cartesian(self, ele=None):
"""
Get the cartesian coordinates of the Cell
If ele is giving. than return the car-coor of
the element=ele
"""
p = self.positions
if ele is not None:
e = ele.Z
num = np.where(self.numbers == e)[0]
p_target = p[num]
else:
p_target = p
cart_coor = np.matmul(p_target, self.lattice)
return cart_coor
def supercell(self, scale_mat):
"""
Get the supercell of the origin gcell
scale_mat is similar as H matrix in superlattice generator
"""
# return self.__class__(...)
sarr_lat = np.matmul(scale_mat, self.lattice)
# coor_conv_pos = np.matmul(self.positions, self.lattice)
# o_conv_pos = np.matmul(coor_conv_pos, np.linalg.inv(scale_mat))
o_conv_pos = np.matmul(self.positions, np.linalg.inv(scale_mat))
o_pos = self.get_frac_from_mat(scale_mat)
l_of_positions = [i for i in map(lambda x: x+o_pos, list(o_conv_pos))]
pos = np.concatenate(l_of_positions, axis=0)
n = scale_mat.diagonal().prod()
numbers = np.repeat(self.numbers, n)
return self.__class__(sarr_lat, pos, numbers)
@staticmethod
def get_frac_from_mat(scale_mat):
inv = np.linalg.inv
mul = np.matmul
m = np.amax(scale_mat)
int_coor_all = np.array([i for i in product(range(m*3), repeat=3)])
frac_all = mul(int_coor_all, inv(scale_mat))
# frac_all = mul(int_coor_all, inv(h_mat))
# print(frac_all)
is_incell = np.all(((frac_all >= -0.00001) & (frac_all < 0.99999)),
axis=1)
ind = np.where(is_incell)[0]
# pdb.set_trace()
return frac_all[ind]
class ModifiedCell(MutableSequence):
""" A cell can converted with gcell:
A cell which can be modified, rather than re-created
a new object from class.
Is a special kind of mutable sequence containing only
:class:`Site`.
ALART: all changes are implented in self._sites and
reflect in positions etc.
"""
def __init__(self, lattice, positions=np.array([[0,0,0]]), numbers=np.array([0])):
self._lattice = lattice
lsites = [s for s in zip(positions.tolist(), numbers.tolist())]
self._sites = [Site(s[0], s[1]) for s in lsites]
def __iter__(self):
"""Must be for Sequence ABC,
Iterates over sites.
"""
return self._sites.__iter__()
def __len__(self):
"""Must be for Sequence ABC,
Number of sites in structure.
"""
return len(self._sites)
def __setitem__(self, index, site):
return self._sites.__setitem__(index, site)
def __getitem__(self, index):
return self._sites.__getitem__(index)
def __delitem__(self, index):
return self._sites.__delitem__(index)
def __eq__(self, other):
is_equ = False
if
|
np.allclose(self._lattice, other._lattice)
|
numpy.allclose
|
import numpy as np
def _extend(M, sym):
"""Extend window by 1 sample if needed for DFT-even symmetry"""
if not sym:
return M + 1, True
else:
return M, False
def _len_guards(M):
"""Handle small or incorrect window lengths"""
if int(M) != M or M < 0:
raise ValueError('Window length M must be a non-negative integer')
return M <= 1
def _truncate(w, needed):
"""Truncate window by 1 sample if needed for DFT-even symmetry"""
if needed:
return w[:-1]
else:
return w
def general_cosine(M, a, sym=True):
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
fac = np.linspace(-np.pi, np.pi, M)
w = np.zeros(M)
for k in range(len(a)):
w += a[k] * np.cos(k * fac)
return _truncate(w, needs_trunc)
def general_hamming(M, alpha, sym=True):
return general_cosine(M, [alpha, 1. - alpha], sym)
def hann(M, sym=True):
# Docstring adapted from NumPy's hanning function
return general_hamming(M, 0.5, sym)
_win_equiv_raw = {
('hanning', 'hann', 'han'): (hann, False),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError) as e:
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, str):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.") from e
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window))) from e
try:
winfunc = _win_equiv[winstr]
except KeyError as e:
raise ValueError("Unknown window type.") from e
params = (Nx,) + args + (sym,)
else:
winfunc = hann
params = (Nx, sym)
return winfunc(*params)
def _triage_segments(window, nperseg, input_length):
# parse window; if array like, then set nperseg = win.shape
if isinstance(window, str) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different"
" from length of window")
return win, nperseg
def _median_bias(n):
ii_2 = 2 * np.arange(1., (n-1) // 2 + 1)
return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1, average='mean'):
"""Estimate power spectral density using Welch's method"""
freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend,
return_onesided=return_onesided, scaling=scaling,
axis=axis, average=average)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1, average='mean'):
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
if average == 'median':
Pxy = np.median(Pxy, axis=-1) / _median_bias(Pxy.shape[-1])
elif average == 'mean':
Pxy = Pxy.mean(axis=-1)
else:
raise ValueError('average must be "median" or "mean", got %s'
% (average,))
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def even_ext(x, n, axis=-1):
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def odd_ext(x, n, axis=-1):
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[tuple(a_slice)]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-D slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def const_ext(x, n, axis=-1):
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def zero_ext(x, n, axis=-1):
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext
### Helper functions ###
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd', boundary=None,
padded=False):
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError as e:
raise ValueError('x and y cannot be broadcast together.') from e
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if necessary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools_detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win, np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = sp_fft_fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = sp_fft_rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
# https://stackoverflow.com/a/5568169
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = sp_fft_fft
else:
result = result.real
func = sp_fft_rfft
result = func(result, n=nfft)
return result
def signaltools_detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False):
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = np.asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - np.expand_dims(np.mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = np.sort(np.unique(np.r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = np.r_[axis, 0:axis, axis + 1:rnk]
newdata = np.reshape(np.transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
if not overwrite_data:
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = np.ones((Npts, 2), dtype)
A[:, 0] = np.cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s =
|
np.linalg.lstsq(A, newdata[sl])
|
numpy.linalg.lstsq
|
import unittest, pickle, math
import numpy as np
from veritas import *
class TestTree(unittest.TestCase):
def myAssertAlmostEqual(self, a, b, eps=1e-6):
self.assertTrue(type(a) == type(b))
if isinstance(a, list) or isinstance(a, tuple) or isinstance(a, np.ndarray):
self.assertEqual(len(a), len(b))
for x, y in zip(a, b):
self.myAssertAlmostEqual(x, y, eps=eps)
elif isinstance(a, float) or isinstance(a, np.float32) or isinstance(a, np.float64):
self.assertAlmostEqual(a, b, delta=eps)
else:
self.assertEqual(a, b)
def test_tree1(self):
at = AddTree()
t = at.add_tree()
t.split(t.root(), 1, 16.0)
t.set_leaf_value(t.left(t.root()), 1.1)
t.set_leaf_value(t.right(t.root()), 2.2)
y = at.eval(np.array([[1.0, 1.0, 3.0], [1.0, 22.0, 3.0]], dtype=np.float32))
#print(y)
self.myAssertAlmostEqual(np.array([1.1, 2.2], dtype=np.float32), y)
self.assertRaises(RuntimeError, at.compute_box, [1, 2])
self.assertEqual(at.compute_box([1]), {1: Domain.from_hi_exclusive(16.0)})
self.assertEqual(at.compute_box([2]), {1: Domain.from_lo(16.0)})
def test_boolsplit(self):
at = AddTree()
t = at.add_tree()
t.split(t.root(), 0, 2.0)
t.split(t.left(t.root()), 1, 1.0)
t.split(t.right(t.root()), 2)
t.set_leaf_value(t.left(t.left(t.root())), 1.0)
t.set_leaf_value(t.right(t.left(t.root())), 2.0)
t.set_leaf_value(t.left(t.right(t.root())), 4.0)
t.set_leaf_value(t.right(t.right(t.root())), 8.0)
#print(at[0])
self.assertEqual(t.get_split( t.root() ), LtSplit(0, 2.0))
self.assertEqual(t.get_split( t.left(t.root())), LtSplit(1, 1.0))
self.assertEqual(t.get_split(t.right(t.root())), LtSplit(2, BOOL_SPLIT_VALUE))
self.assertEqual(at.compute_box([5]), {0: Domain.from_lo(2.0), 2: FALSE_DOMAIN})
self.assertEqual(at.compute_box([6]), {0: Domain.from_lo(2.0), 2: TRUE_DOMAIN})
T, F = 1.0, 0.0
y = at.eval(np.array([
[0.0, 0.5, F], [0.0, 1.5, T],
[2.5, 0.5, T], [2.5, 0.5, F]], dtype=np.float32))
self.myAssertAlmostEqual(y,
|
np.array([1.0, 2.0, 8.0, 4.0], dtype=np.float32)
|
numpy.array
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from DeepPurpose.utils import *
'''
Author: <NAME>
Acknowledgement:
We filter the dataset based on column:
Number of Protein Chains in Target (>1 implies a multichain complex)
Ligand SMILES
PubChem CID
UniProt (SwissProt) Primary ID of Target Chain
Ligand InChI
Ki (nM)
IC50 (nM)
Kd (nM)
EC50 (nM)
'''
def preprocessing_BindingDB(path = None, df = None, y = 'Kd', binary = False, convert_to_log = True, threshold = 30, filter_rough_value = True, save_filtered_dataset_to_file = None, max_smiles_length = None, max_protein_length = None):
'''
The y is one of these types: Kd, IC50, Ki, EC50
'''
if df is not None:
print('Loading Dataset from the pandas input...')
else:
print('Loading Dataset from path...')
if not os.path.isfile(path):
print('File not exist: ' + path)
return
df = pd.read_csv(path, sep = '\t', error_bad_lines=False)
print('Beginning Processing...')
df = df[df['Number of Protein Chains in Target (>1 implies a multichain complex)'] == 1.0]
df = df[df['Ligand SMILES'].notnull()]
if y == 'Kd':
idx_str = 'Kd (nM)'
elif y == 'IC50':
idx_str = 'IC50 (nM)'
elif y == 'Ki':
idx_str = 'Ki (nM)'
elif y == 'EC50':
idx_str = 'EC50 (nM)'
else:
print('select Kd, Ki, IC50 or EC50')
return
df_want = df[df[idx_str].notnull()]
df_want = df_want[['BindingDB Reactant_set_id', 'Ligand InChI', 'Ligand SMILES', \
'PubChem CID', 'UniProt (SwissProt) Primary ID of Target Chain', \
'BindingDB Target Chain Sequence', idx_str]]
df_want.rename(columns={'BindingDB Reactant_set_id':'ID',
'Ligand SMILES':'SMILES',
'Ligand InChI':'InChI',
'PubChem CID':'PubChem_ID',
'UniProt (SwissProt) Primary ID of Target Chain':'UniProt_ID',
'BindingDB Target Chain Sequence': 'Target Sequence',
idx_str: 'Label'},
inplace=True)
if filter_rough_value:
# df_want = df_want[df_want['Label'].astype(str).str.contains('>|<') == False]
df_want = df_want[~df_want['Label'].str.contains('>|<', na=False)]
else:
df_want['Label'] = df_want['Label'].str.replace('>', '')
df_want['Label'] = df_want['Label'].str.replace('<', '')
df_want['Label'] = df_want['Label'].astype(float)
# have at least uniprot or pubchem ID
df_want = df_want[df_want.PubChem_ID.notnull() | df_want.UniProt_ID.notnull()]
df_want = df_want[df_want.InChI.notnull()]
df_want = df_want[(df_want.Label > 0) & (df_want.Label <= 10000000.0)]
# filter the records with too long SMILES or protein sequences
if max_smiles_length is not None:
df_want = df_want[df_want.SMILES.apply(lambda x: len(str(x))<max_smiles_length)]
if max_protein_length is not None:
df_want = df_want[df_want['Target Sequence'].apply(lambda x: len(str(x))<max_protein_length)]
print('There are ' + str(len(df_want)) + ' drug target pairs.')
if binary:
print('Default binary threshold for the binding affinity scores are 30, you can adjust it by using the "threshold" parameter')
y = [1 if i else 0 for i in df_want.Label.values < threshold]
else:
if convert_to_log:
print('Default set to logspace (nM -> p) for easier regression')
y = convert_y_unit(df_want.Label.values, 'nM', 'p')
else:
y = df_want.Label.values
if save_filtered_dataset_to_file is not None:
print('Saving filtered dataset to path...')
df_want['LABEL_CONVERTED'] = y
df_want.to_csv(save_filtered_dataset_to_file, sep = '\t', index=False)
return df_want.SMILES.values, df_want['Target Sequence'].values,
|
np.array(y)
|
numpy.array
|
import csv
import numpy as np
import scipy.sparse as sp
def load_data_baseline(path_dataset):
"""Load data in text format, one rating per line."""
data = read_txt(path_dataset)[1:]
return preprocess_data(data)
def read_txt(path):
"""read text file from path."""
with open(path, "r") as f:
return f.read().splitlines()
def preprocess_data(data):
"""preprocessing the text data, conversion to numerical array format."""
def deal_line(line):
pos, rating = line.split(',')
row, col = pos.split("_")
row = row.replace("r", "")
col = col.replace("c", "")
return int(row), int(col), float(rating)
def statistics(data):
"""get stats about the data"""
row = set([line[0] for line in data])
col = set([line[1] for line in data])
return min(row), max(row), min(col), max(col)
# parse each line
data = [deal_line(line) for line in data]
# do statistics on the dataset.
min_row, max_row, min_col, max_col = statistics(data)
print("number of items: {}, number of users: {}".format(max_col, max_row))
# build rating matrix.
ratings = sp.lil_matrix((max_row, max_col))
for row, col, rating in data:
ratings[row - 1, col - 1] = rating
return ratings
def load_sample_sub(path_dataset):
"""Load data in text format, one rating per line, as in the competition."""
data = read_txt(path_dataset)[1:]
return preprocess_surprise(data)
def preprocess_surprise(data):
"""preprocessing the text data, conversion to numerical array format."""
def deal_line(line):
pos, rating = line.split(',')
row, col = pos.split("_")
row = row.replace("r", "")
col = col.replace("c", "")
return int(row), int(col), int(rating)
data = [deal_line(line) for line in data]
return
|
np.array(data)
|
numpy.array
|
import numpy as np
import scipy.linalg as scplinalg
import scipy.spatial as scpspatial
from ..utils import to_unit_box
from .kernels import CubicKernel, Kernel
from .surrogate import Surrogate
from .tails import LinearTail, Tail
class RBFInterpolant(Surrogate):
"""Compute and evaluate RBF interpolant.
Manages an expansion of the form
.. math::
s(x) = \\sum_j c_j \\phi(\\|x-x_j\\|) + \\sum_j \\lambda_j p_j(x)
where the functions :math:`p_j(x)` are low-degree polynomials.
The fitting equations are
.. math::
\\begin{bmatrix} \\eta I & P^T \\\\ P & \\Phi+\\eta I \\end{bmatrix}
\\begin{bmatrix} \\lambda \\\\ c \\end{bmatrix} =
\\begin{bmatrix} 0 \\\\ f \\end{bmatrix}
where :math:`P_{ij} = p_j(x_i)` and :math:`\\Phi_{ij}=\\phi(\\|x_i-x_j\\|)`
The regularization parameter :math:`\\eta` allows us to avoid problems
with potential poor conditioning of the system. Consider using the
SurrogateUnitBox wrapper or manually scaling the domain to the unit
hypercube to avoid issues with the domain scaling.
We add k new points to the RBFInterpolant in :math:`O(kn^2)` flops by
updating the LU factorization of the old RBF system. This is better
than computing the RBF coefficients from scratch, which costs
:math:`O(n^3)` flops.
:param dim: Number of dimensions
:type dim: int
:param lb: Lower variable bounds
:type lb: numpy.array
:param ub: Upper variable bounds
:type ub: numpy.array
:param output_transformation: Transformation applied to values before fitting
:type output_transformation: Callable
:param kernel: RBF kernel object
:type kernel: Kernel
:param tail: RBF polynomial tail object
:type tail: Tail
:param eta: Regularization parameter
:type eta: float
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar output_transformation: Transformation to apply to function values before fitting
:ivar num_pts: Number of points in surrogate model
:ivar X: Point incorporated in surrogate model (num_pts x dim)
:ivar fX: Function values in surrogate model (num_pts x 1)
:ivar updated: True if model is up-to-date (no refit needed)
:ivar kernel: RBF kernel
:ivar tail: RBF tail
:ivar eta: Regularization parameter
"""
def __init__(self, dim, lb, ub, output_transformation=None, kernel=None, tail=None, eta=1e-6):
super().__init__(dim=dim, lb=lb, ub=ub, output_transformation=output_transformation)
if kernel is None or tail is None:
kernel = CubicKernel()
tail = LinearTail(dim)
assert isinstance(kernel, Kernel) and isinstance(tail, Tail)
self.kernel = kernel
self.tail = tail
self.ntail = tail.dim_tail
self.A = None
self.L = None
self.U = None
self.piv = None
self.c = None
self.eta = eta
if kernel.order - 1 > tail.degree:
raise ValueError("Kernel and tail mismatch")
assert self.dim == self.tail.dim
def reset(self):
"""Reset the RBF interpolant."""
super().reset()
self.L = None
self.U = None
self.piv = None
self.c = None
def _fit(self):
"""Compute new coefficients if the RBF is not updated.
We try to update an existing LU factorization by computing a Cholesky
factorization of the Schur complemented system. This may fail if the
system is ill-conditioned, in which case we compute a new LU
factorization.
"""
if not self.updated:
n = self.num_pts
ntail = self.ntail
nact = ntail + n
if self.c is None: # Initial fit
assert self.num_pts >= ntail
X = self._X[0:n, :]
D = scpspatial.distance.cdist(X, X)
Phi = self.kernel.eval(D) + self.eta * np.eye(n)
P = self.tail.eval(X)
# Set up the systems matrix
A1 = np.hstack((np.zeros((ntail, ntail)), P.T))
A2 = np.hstack((P, Phi))
A = np.vstack((A1, A2))
[LU, piv] = scplinalg.lu_factor(A)
self.L = np.tril(LU, -1) + np.eye(nact)
self.U = np.triu(LU)
# Construct the usual pivoting vector so that we can increment
self.piv = np.arange(0, nact)
for i in range(nact):
self.piv[i], self.piv[piv[i]] = self.piv[piv[i]], self.piv[i]
else: # Extend LU factorization
k = self.c.shape[0] - ntail
numnew = n - k
kact = ntail + k
X = self._X[:n, :]
XX = self._X[k:n, :]
D = scpspatial.distance.cdist(X, XX)
Pnew = np.vstack((self.tail.eval(XX).T, self.kernel.eval(D[:k, :])))
Phinew = self.kernel.eval(D[k:, :]) + self.eta *
|
np.eye(numnew)
|
numpy.eye
|
r"""OnlinePelt"""
from math import floor
from numpy import vstack, array
from ruptures.costs import cost_factory
from ruptures.base import BaseCost
from ruptures.exceptions import BadSegmentationParameters
from ruptures.utils import sanity_check
class OnlinePelt:
"""Penalized online change point detection.
For a given model and penalty level, computes the segmentation which
minimizes the constrained sum of approximation errors.
"""
def __init__(self, model="l2", custom_cost=None, min_size=2, jump=5, params=None):
"""Initialize a Pelt instance.
Args:
model (str, optional): segment model, ["l1", "l2", "rbf"]. Not used if ``'custom_cost'`` is not None.
custom_cost (BaseCost, optional): custom cost function. Defaults to None.
min_size (int, optional): minimum segment length.
jump (int, optional): subsample (one every *jump* points).
params (dict, optional): a dictionary of parameters for the cost instance.
"""
if custom_cost is not None and isinstance(custom_cost, BaseCost):
self.cost = custom_cost
else:
if params is None:
self.cost = cost_factory(model=model)
else:
self.cost = cost_factory(model=model, **params)
self.min_size = max(min_size, self.cost.min_size)
self.jump = jump
self.n_samples = None
# initialization
# partitions[t] contains the optimal partition of signal[0:t]
self.partitions = dict() # this dict will be recursively filled
self.partitions[0] = {(0, 0): 0}
self.last_admissible = []
self.last_n = 0
def _seg(self, pen, debug=False):
"""Computes the segmentation for a given penalty using PELT (or a list
of penalties).
Args:
penalty (float): penalty value
Returns:
dict: partition dict {(start, end): cost value,...}
"""
admissible = self.last_admissible
# Recursion
ind = [k for k in range(self.last_n, self.n_samples, self.jump) if k >= self.min_size]
ind += [self.n_samples]
for bkp in ind:
# adding a point to the admissible set from the previous loop.
new_adm_pt = floor((bkp - self.min_size) / self.jump)
new_adm_pt *= self.jump
if new_adm_pt not in admissible:
admissible.append(new_adm_pt)
if debug:
print(f"\tCP: {bkp}, admissible: {admissible}")
subproblems = list()
for t in admissible:
# left partition
try:
tmp_partition = self.partitions[t].copy()
except KeyError: # no partition of 0:t exists. Example: t= 1: 1 < min_size; t=0 is initial case
continue
# we update with the right partition
tmp_partition.update({(t, bkp): self.cost.error(t, bkp) + pen})
subproblems.append(tmp_partition)
if debug:
print(f"\t\t t={t}, subproblem = {subproblems[-1]}")
# finding the optimal partition
self.partitions[bkp] = min(subproblems, key=lambda d: sum(d.values()))
if debug:
print(f"\t\t Best Bkps : {self.partitions[bkp]}")
# trimming the admissible set
admissible = [
t
for t, partition in zip(admissible, subproblems)
if sum(partition.values()) <= sum(self.partitions[bkp].values()) + pen
]
if debug:
print(f"\t\t- new admissible: {admissible}")
self.last_admissible = admissible
self.last_n = self.n_samples
best_partition = self.partitions[self.n_samples].copy()
del best_partition[(0, 0)]
return best_partition
def predict(self, pen, debug=False):
"""Return the optimal breakpoints.
Must be called after the fit method. The breakpoints are associated with the signal passed
to [`fit()`][ruptures.detection.pelt.Pelt.fit].
Raises:
BadSegmentationParameters: in case of impossible segmentation
configuration
Returns:
list: sorted list of breakpoints
"""
# raise an exception in case of impossible segmentation configuration
if self.cost.signal.shape[0] < self.min_size:
return []
if not sanity_check(
n_samples=self.cost.signal.shape[0],
n_bkps=0,
jump=self.jump,
min_size=self.min_size,
):
raise BadSegmentationParameters
partition = self._seg(pen, debug)
bkps = sorted(e for s, e in partition.keys())
return bkps
def update_signal(self, subsignal, debug=False):
"""Set params.
Args:
debug: True for print()-Debugging
subsignal (array): Append subsignal of shape (n_samples, n_features) or (n_samples,).
Returns:
self
"""
s =
|
array(subsignal)
|
numpy.array
|
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import sghmc
# In[2]:
pima = np.genfromtxt('pima-indians-diabetes.data', delimiter=',')
names = ["Number of times pregnant",
"Plasma glucose concentration",
"Diastolic blood pressure (mm Hg)",
"Triceps skin fold thickness (mm)",
"2-Hour serum insulin (mu U/ml)",
"Body mass index (weight in kg/(height in m)^2)",
"Diabetes pedigree function",
"Age (years)",
"Class variable (0 or 1)"]
# In[3]:
# Load data
X = np.concatenate((np.ones((pima.shape[0],1)),pima[:,0:8]), axis=1)
Y = pima[:,8]
Xs = (X -
|
np.mean(X, axis=0)
|
numpy.mean
|
# Copyright 2016 Intel Corporation, Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import re
import warnings
import os.path
import psutil
from .fmrisim import generate_stimfunction, _double_gamma_hrf, convolve_hrf
from sklearn.utils import check_random_state
from scipy.fftpack import fft, ifft
import math
import logging
logger = logging.getLogger(__name__)
"""
Some utility functions that can be used by different algorithms
"""
__all__ = [
"center_mass_exp",
"compute_p_from_null_distribution",
"concatenate_not_none",
"cov2corr",
"ecdf",
"from_tri_2_sym",
"from_sym_2_tri",
"gen_design",
"phase_randomize",
"p_from_null",
"ReadDesign",
"sumexp_stable",
"usable_cpu_count",
]
def from_tri_2_sym(tri, dim):
"""convert a upper triangular matrix in 1D format
to 2D symmetric matrix
Parameters
----------
tri: 1D array
Contains elements of upper triangular matrix
dim : int
The dimension of target matrix.
Returns
-------
symm : 2D array
Symmetric matrix in shape=[dim, dim]
"""
symm = np.zeros((dim, dim))
symm[np.triu_indices(dim)] = tri
return symm
def from_sym_2_tri(symm):
"""convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix
"""
inds = np.triu_indices_from(symm)
tri = symm[inds]
return tri
def sumexp_stable(data):
"""Compute the sum of exponents for a list of samples
Parameters
----------
data : array, shape=[features, samples]
A data array containing samples.
Returns
-------
result_sum : array, shape=[samples,]
The sum of exponents for each sample divided by the exponent
of the maximum feature value in the sample.
max_value : array, shape=[samples,]
The maximum feature value for each sample.
result_exp : array, shape=[features, samples]
The exponent of each element in each sample divided by the exponent
of the maximum feature value in the sample.
Note
----
This function is more stable than computing the sum(exp(v)).
It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function.
"""
max_value = data.max(axis=0)
result_exp = np.exp(data - max_value)
result_sum = np.sum(result_exp, axis=0)
return result_sum, max_value, result_exp
def concatenate_not_none(l, axis=0):
"""Construct a numpy array by stacking not-None arrays in a list
Parameters
----------
data : list of arrays
The list of arrays to be concatenated. Arrays have same shape in all
but one dimension or are None, in which case they are ignored.
axis : int, default = 0
Axis for the concatenation
Returns
-------
data_stacked : array
The resulting concatenated array.
"""
# Get the indexes of the arrays in the list
mask = []
for i in range(len(l)):
if l[i] is not None:
mask.append(i)
# Concatenate them
l_stacked = np.concatenate([l[i] for i in mask], axis=axis)
return l_stacked
def cov2corr(cov):
"""Calculate the correlation matrix based on a
covariance matrix
Parameters
----------
cov: 2D array
Returns
-------
corr: 2D array
correlation converted from the covarince matrix
"""
assert cov.ndim == 2, 'covariance matrix should be 2D array'
inv_sd = 1 / np.sqrt(np.diag(cov))
corr = cov * inv_sd[None, :] * inv_sd[:, None]
return corr
class ReadDesign:
"""A class which has the ability of reading in design matrix in .1D file,
generated by AFNI's 3dDeconvolve.
Parameters
----------
fname: string, the address of the file to read.
include_orth: Boollean, whether to include "orthogonal" regressors in
the nuisance regressors which are usually head motion parameters.
All the columns of design matrix are still going to be read in,
but the attribute cols_used will reflect whether these orthogonal
regressors are to be included for furhter analysis.
Note that these are not entered into design_task attribute which
include only regressors related to task conditions.
include_pols: Boollean, whether to include polynomial regressors in
the nuisance regressors which are used to capture slow drift of
signals.
Attributes
----------
design: 2d array. The design matrix read in from the csv file.
design_task: 2d array. The part of design matrix corresponding to
task conditions.
n_col: number of total columns in the design matrix.
column_types: 1d array. the types of each column in the design matrix.
0 for orthogonal regressors (usually head motion parameters),
-1 for polynomial basis (capturing slow drift of signals),
values > 0 for stimulus conditions
n_basis: scalar. The number of polynomial bases in the designn matrix.
n_stim: scalar. The number of stimulus conditions.
n_orth: scalar. The number of orthogoanal regressors (usually head
motions)
StimLabels: list. The names of each column in the design matrix.
"""
def __init__(self, fname=None, include_orth=True, include_pols=True):
if fname is None:
# fname is the name of the file to read in the design matrix
self.design = np.zeros([0, 0])
self.n_col = 0
# number of columns (conditions) in the design matrix
self.column_types = np.ones(0)
self.n_basis = 0
self.n_stim = 0
self.n_orth = 0
self.StimLabels = []
else:
# isAFNI = re.match(r'.+[.](1D|1d|txt)$', fname)
filename, ext = os.path.splitext(fname)
# We assume all AFNI 1D files have extension of 1D or 1d or txt
if ext in ['.1D', '.1d', '.txt']:
self.read_afni(fname=fname)
self.include_orth = include_orth
self.include_pols = include_pols
# The two flags above dictates whether columns corresponding to
# baseline drift modeled by polynomial functions of time and
# columns corresponding to other orthogonal signals (usually motion)
# are included in nuisance regressors.
self.cols_task = np.where(self.column_types == 1)[0]
self.design_task = self.design[:, self.cols_task]
if np.ndim(self.design_task) == 1:
self.design_task = self.design_task[:, None]
# part of the design matrix related to task conditions.
self.n_TR = np.size(self.design_task, axis=0)
self.cols_nuisance = np.array([])
if self.include_orth:
self.cols_nuisance = np.int0(
np.sort(np.append(self.cols_nuisance,
np.where(self.column_types == 0)[0])))
if self.include_pols:
self.cols_nuisance = np.int0(
np.sort(np.append(self.cols_nuisance,
np.where(self.column_types == -1)[0])))
if np.size(self.cols_nuisance) > 0:
self.reg_nuisance = self.design[:, self.cols_nuisance]
if np.ndim(self.reg_nuisance) == 1:
self.reg_nuisance = self.reg_nuisance[:, None]
else:
self.reg_nuisance = None
# Nuisance regressors for motion, baseline, etc.
def read_afni(self, fname):
# Read design file written by AFNI
self.n_basis = 0
self.n_stim = 0
self.n_orth = 0
self.StimLabels = []
self.design =
|
np.loadtxt(fname, ndmin=2)
|
numpy.loadtxt
|
import numpy as np
import cv2
SEAM_COLOR = np.array([100, 100, 250])
def showPicture(im, show_seam_mask=None, rotate=False,picture_name = "picture",stop = False):
vis = im.astype(np.uint8)
if show_seam_mask is not None:
vis[np.where(show_seam_mask == False)] = SEAM_COLOR
if rotate:
vis = rotateImg(vis, False)
cv2.imshow(picture_name, vis)
if stop:
cv2.waitKey(0)
cv2.waitKey(1)
return vis
def resize(image, width):
"""
return img after resize
"""
dim = None
h, w = image.shape[:2]
dim = (width, int(h * width / float(w)))
return cv2.resize(image, dim)
def rotateImg(image, clockwise):
k = 1 if clockwise else 3
return
|
np.rot90(image, k)
|
numpy.rot90
|
#!/usr/bin/env python
# coding: utf-8
# # Vector manipulation in Python
#
# In this lab, you will have the opportunity to practice once again with the NumPy library. This time, we will explore some advanced operations with arrays and matrices.
#
# At the end of the previous module, we used PCA to transform a set of many variables into a set of only two uncorrelated variables. This process was made through a transformation of the data called rotation.
#
# In this week's assignment, you will need to find a transformation matrix from English to French vector space embeddings. Such a transformation matrix is nothing else but a matrix that rotates and scales vector spaces.
#
# In this notebook, we will explain in detail the rotation transformation.
# ## Transforming vectors
#
# There are three main vector transformations:
# * Scaling
# * Translation
# * Rotation
#
# In previous notebooks, we have applied the first two kinds of transformations. Now, let us learn how to use a fundamental transformation on vectors called _rotation_.
#
# The rotation operation changes the direction of a vector, letting unaffected its dimensionality and its norm. Let us explain with some examples.
#
# In the following cells, we will define a NumPy matrix and a NumPy array. Soon we will explain how this is related to matrix rotation.
# In[1]:
import numpy as np # Import numpy for array manipulation
import matplotlib.pyplot as plt # Import matplotlib for charts
from utils_nb import plot_vectors # Function to plot vectors (arrows)
# ### Example 1
# In[2]:
# Create a 2 x 2 matrix
R = np.array([[2, 0],
[0, -2]])
# In[3]:
x = np.array([[1, 1]]) # Create a 1 x 2 matrix
# The dot product between a vector and a square matrix produces a rotation and a scaling of the original vector.
#
# Remember that our recommended way to get the dot product in Python is np.dot(a, b):
# In[4]:
y = np.dot(x, R) # Apply the dot product between x and R
y
# We are going to use Pyplot to inspect the effect of the rotation on 2D vectors visually. For that, we have created a function `plot_vectors()` that takes care of all the intricate parts of the visual formatting. The code for this function is inside the `utils_nb.py` file.
#
# Now we can plot the vector $\vec x = [1, 1]$ in a cartesian plane. The cartesian plane will be centered at `[0,0]` and its x and y limits will be between `[-4, +4]`
# In[5]:
plot_vectors([x], axes=[4, 4], fname='transform_x.svg')
# Now, let's plot in the same system our vector $\vec x = [1, 1]$ and its dot product with the matrix
#
# $$Ro = \begin{bmatrix} 2 & 0 \\ 0 & -2 \end{bmatrix}$$
#
# $$y = x \cdot Ro = [[2, -2]]$$
# In[6]:
plot_vectors([x, y], axes=[4, 4], fname='transformx_and_y.svg')
# Note that the output vector `y` (blue) is transformed in another vector.
# ### Example 2
#
# We are going to use Pyplot to inspect the effect of the rotation on 2D vectors visually. For that, we have created a function that takes care of all the intricate parts of the visual formatting. The following procedure plots an arrow within a Pyplot canvas.
#
# Data that is composed of 2 real attributes is telling to belong to a $ RxR $ or $ R^2 $ space. Rotation matrices in $R^2$ rotate a given vector $\vec x$ by a counterclockwise angle $\theta$ in a fixed coordinate system. Rotation matrices are of the form:
#
# $$Ro = \begin{bmatrix} cos \theta & -sin \theta \\ sin \theta & cos \theta \end{bmatrix}$$
#
# **(Note:** This notebook uses $$y = x \cdot Ro$$ But if you use $$y = Ro \cdot x.T$$
#
# Then the rotation matrices in $R^2$ rotate a given vector $\vec x$ by a clockwise angle $\theta$ in a fixed coordinate system.**)**
#
# The trigonometric functions in Numpy require the angle in radians, not in degrees. In the next cell, we define a rotation matrix that rotates vectors by $100^o$.
# In[7]:
angle = 100 * (np.pi / 180) #convert degrees to radians
Ro = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
x2 = np.array([2, 2]).reshape(1, -1) # make it a row vector
y2 = np.dot(x2, Ro)
print('Rotation matrix')
print(Ro)
print('\nRotated vector')
print(y2)
print('\n x2 norm',
|
np.linalg.norm(x2)
|
numpy.linalg.norm
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom training loops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.util import nest
def get_dataset_from_tensor_slices(inp_array):
dataset = dataset_ops.DatasetV2.from_tensor_slices(inp_array)
# TODO(b/138326910): Remove Dataset V1 version once bug resolved.
if not tf2.enabled():
dataset = dataset_ops.Dataset.from_tensor_slices(inp_array)
return dataset
class AssertFlattenedMixin(object):
"""Mixin for specialized asserts."""
def assert_equal_flattened(self, expected_results, actual_results):
"""Asserts that flattened results are equal.
Due to the number of replicas in the strategy, the output may have a
different structure and needs to be flattened for comparison.
Args:
expected_results: The results expected as a result of a computation.
actual_results: The actual results of a computation.
"""
self.assertEqual(len(expected_results), len(actual_results))
for i, expected_result in enumerate(expected_results):
final_result = []
actual_result = actual_results[i]
for val in actual_result:
final_result.extend(val.numpy())
self.assertAllEqual(expected_result, final_result)
class InputIterationTest(test.TestCase, parameterized.TestCase,
AssertFlattenedMixin):
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testConstantNumpyInput(self, distribution):
@def_function.function
def run(x):
def computation(x):
return math_ops.square(x)
outputs = distribution.experimental_local_results(
distribution.experimental_run_v2(computation, args=(x,)))
return outputs
self.assertAllEqual(
constant_op.constant(4., shape=(distribution.num_replicas_in_sync)),
run(2.))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testStatefulExperimentalRunAlwaysExecute(self, distribution):
with distribution.scope():
v = variables.Variable(
0.0, aggregation=variables.VariableAggregation.MEAN)
@def_function.function
def train_step():
def assign_add():
v.assign_add(1.0)
distribution.experimental_run_v2(assign_add)
return array_ops.zeros([])
train_step()
self.assertAllEqual(1.0, v.numpy())
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.strategies_minus_tpu,
mode=["eager"]))
def testFullEager(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
def train_step(data):
return math_ops.square(data)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = distribution.experimental_local_results(
distribution.experimental_run_v2(train_step, args=(x,)))
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testStepInFunction(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
@def_function.function
def train_step(data):
return math_ops.square(data)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = distribution.experimental_local_results(
distribution.experimental_run_v2(train_step, args=(x,)))
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testRunInFunction(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
def train_step(data):
return math_ops.square(data)
@def_function.function
def f_train_step(input_data):
return distribution.experimental_local_results(
distribution.experimental_run_v2(train_step, args=(input_data,)))
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = f_train_step(x)
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy
],
mode=["eager"]))
def testNestedOutput(self, distribution):
dataset = get_dataset_from_tensor_slices([0, 1, 2, 3]).batch(2)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
def computation(x):
return [{
"a": x - 1,
"b": x + 1
}]
inputs = next(iterator)
outputs = distribution.experimental_run_v2(computation, args=(inputs,))
return nest.map_structure(distribution.experimental_local_results,
outputs)
results = run(input_iterator)
for replica in range(distribution.num_replicas_in_sync):
# The input dataset is range(4), so the replica id is same as input.
self.assertAllEqual(results[0]["a"][replica], [replica - 1])
self.assertAllEqual(results[0]["b"][replica], [replica + 1])
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testRunInFunctionAutoGraphApplication(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
def train_step(data):
return math_ops.square(data)
@def_function.function
def f_train_step(input_data):
return distribution.experimental_local_results(
distribution.experimental_run_v2(train_step, args=(input_data,)))
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = f_train_step(x)
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetIterationInFunction(self, distribution):
with distribution.scope():
a = variables.Variable(
1.0, aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA)
def train_step(_):
a.assign_add(1.0)
@def_function.function
def f_train_step(dist_dataset):
number_of_steps = constant_op.constant(0.0)
product_of_means = constant_op.constant(2.0)
for x in dist_dataset: # loop with values modified each iteration
number_of_steps += 1
product_of_means *= math_ops.cast(
distribution.reduce("MEAN", x, axis=0), product_of_means.dtype)
for y in dist_dataset: # loop with no intermediate state
distribution.experimental_run_v2(train_step, args=(y,))
return number_of_steps, product_of_means
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
number_of_steps, product_of_means = f_train_step(dist_dataset)
self.assertEqual(2, number_of_steps.numpy())
self.assertNear((2 * (5+6)/2 * (7+8)/2), product_of_means.numpy(), 1e-3)
# We set the initial value of `a` to 1 and iterate through the dataset 2
# times(4/2 where 4 is the number of dataset elements and 2 is the batch
# size). Hence the final result is 3.
self.assertEqual(3.0, (a.numpy()))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetAssertWithDynamicBatch(self, distribution):
# Regression test for github issue 33517.
def step_fn(data):
assert_op = control_flow_ops.Assert(math_ops.less_equal(
math_ops.reduce_max(data), 100.), [data])
with ops.control_dependencies([assert_op]):
return math_ops.square(data)
@def_function.function
def train(dataset):
results = []
iterator = iter(dataset)
# we iterate through the loop 5 times since we have 3 elements and a
# global batch of 2.
for _ in range(2):
elem = next(iterator)
output = distribution.experimental_local_results(
distribution.experimental_run_v2(step_fn, args=(elem,)))
results.append(output)
return results
dataset = dataset_ops.DatasetV2.from_tensor_slices([5., 6., 7.,]).batch(2)
# TODO(b/138326910): Remove Dataset V1 version once bug resolved.
if not tf2.enabled():
dataset = dataset_ops.Dataset.from_tensor_slices([5., 6., 7.,]).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = train(dist_dataset)
expected_results = [[25., 36.], [49.]]
self.assertEqual(len(expected_results), len(results))
# Need to expand results since output will be grouped differently depending
# on the number of replicas.
for i, expected_result in enumerate(expected_results):
final_result = []
actual_result = results[i]
for val in actual_result:
final_result.extend(val.numpy())
self.assertAllEqual(expected_result, final_result)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDynamicShapes(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
def computation(x):
return math_ops.reduce_mean(x)
inputs = next(iterator)
outputs = distribution.experimental_local_results(
distribution.experimental_run_v2(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([5.5, 7.], run(input_iterator))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDynamicShapesWithGetNextOutsideFunction(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(inputs):
def computation(x):
return math_ops.reduce_mean(x)
outputs = distribution.experimental_local_results(
distribution.experimental_run_v2(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([5.5, 7.], run(next(input_iterator)))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testStrategyReduceWithDynamicShapes(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
inputs = next(iterator)
return distribution.reduce(reduce_util.ReduceOp.MEAN, inputs, axis=0)
self.assertAllEqual(6., run(input_iterator))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testStrategyReduceWithDynamicShapesRank2(self, distribution):
dataset = get_dataset_from_tensor_slices(
[[1., 1.], [1., 1.], [1., 1.]]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
inputs = next(iterator)
return distribution.reduce(reduce_util.ReduceOp.MEAN, inputs, axis=0)
self.assertAllEqual([1., 1.], run(input_iterator))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDynamicShapesWithSizeOp(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(inputs):
def computation(x):
return array_ops.size_v2(x)
outputs = distribution.experimental_local_results(
distribution.experimental_run_v2(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([2, 1], run(next(input_iterator)))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDynamicShapesWithFirstReplicaNotMaximumShape(self, distribution):
def dataset_fn(_):
dataset1 = get_dataset_from_tensor_slices([[1., 2.], [1., 2.]])
dataset2 = get_dataset_from_tensor_slices([[1., 2., 3.],
[1., 2., 3.]])
dataset = dataset1.concatenate(dataset2)
dataset = dataset.batch(2, drop_remainder=True)
return dataset
input_iterator = iter(
distribution.experimental_distribute_datasets_from_function(dataset_fn))
@def_function.function
def run(inputs):
def computation(x):
return math_ops.reduce_mean(x)
outputs = distribution.experimental_local_results(
distribution.experimental_run_v2(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([1.5, 2.], run(next(input_iterator)))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetDistributeEvenlyDivisibleDrop(self, distribution):
# If the batch size is evenly divisible by the number of workers and we set
# drop_remainder=True on the dataset, then DistributedIterator will use a
# different (and more efficient) code path which avoids some control flow
# ops.
dataset = get_dataset_from_tensor_slices([5., 6.]).batch(
2, drop_remainder=True)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
data = next(input_iterator)
expected_result = [5., 6.]
final_result = []
actual_result = distribution.experimental_local_results(data)
for val in actual_result:
final_result.extend(val)
self.assertAllEqual(expected_result, final_result)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetDistributeNotDivisibleDrop(self, distribution):
# If each batch is not evenly divisible by the number of workers,
# the remainder will be dropped.
dataset = get_dataset_from_tensor_slices([5., 6.]).batch(
1, drop_remainder=True)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
data = next(input_iterator)
expected_result = [5.]
final_result = []
actual_result = distribution.experimental_local_results(data)
for val in actual_result:
final_result.extend(val)
self.assertAllEqual(expected_result, final_result)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetDistributeEvenlyDivisibleNoDrop(self, distribution):
# Setting drop_remainder=False on the dataset causes DistributedIterator
# to use get_next_as_optional(), even if the batched dataset is evenly
# divisible by the number of workers.
dataset = get_dataset_from_tensor_slices([5., 6.]).batch(
2, drop_remainder=False)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
data = next(input_iterator)
expected_result = [5., 6.]
final_result = []
actual_result = distribution.experimental_local_results(data)
for val in actual_result:
final_result.extend(val)
self.assertAllEqual(expected_result, final_result)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetPartialBatchWithMixedOutputs(self, distribution):
# Dynamic output size with a mix of static and dynamic outputs
dataset = get_dataset_from_tensor_slices([5.]).batch(2)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
def computation(x):
# Fixed size output with a dynamic sized output.
return array_ops.zeros([3]), math_ops.square(x)
return distribution.experimental_run_v2(
computation, args=(next(iterator),))
results = run(input_iterator)
# First result is fixed for all replicas.
for replica_id in range(distribution.num_replicas_in_sync):
self.assertAllEqual([0., 0., 0.],
distribution.experimental_local_results(
results[0])[replica_id])
# Only first replica has distributed dataset computation.
self.assertAllEqual([25.],
distribution.experimental_local_results(results[1])[0])
# Other replicas have no distributed dataset computation.
for replica_id in range(1, distribution.num_replicas_in_sync):
self.assertAllEqual([],
distribution.experimental_local_results(
results[1])[replica_id])
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testIterationInsideFunction(self, distribution):
def step_fn(data):
return math_ops.square(data)
@def_function.function
def train(dataset):
results = []
iterator = iter(dataset)
# we iterate through the loop 2 times since we have 4 elements and a
# global batch of 2.
for _ in range(2):
elem = next(iterator)
output = distribution.experimental_local_results(
distribution.experimental_run_v2(step_fn, args=(elem,)))
results.append(output)
return results
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = train(dist_dataset)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testIterationOutsideFunction(self, distribution):
def train_step(data):
return math_ops.square(data)
@def_function.function
def f_train_step(input_data):
return distribution.experimental_local_results(
distribution.experimental_run_v2(train_step, args=(input_data,)))
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
iterator = iter(dist_dataset)
results = []
# we iterate through the loop 2 times since we have 4 elements and a
# global batch of 2.
for _ in range(2):
output = f_train_step(next(iterator))
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
class GradientTapeTest(test.TestCase, parameterized.TestCase,
AssertFlattenedMixin):
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testStepInFunctionGradient(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
@def_function.function
def train_step(x):
def computation(x):
return math_ops.square(x)
with backprop.GradientTape() as tape:
tape.watch(x) # Manually watch non-variable tensors.
y = computation(x)
grads = tape.gradient(y, x)
return grads
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = distribution.experimental_local_results(
distribution.experimental_run_v2(train_step, args=(x,)))
results.append(output)
self.assert_equal_flattened([[10., 12.], [14., 16.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testRunInFunctionGradient(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
@def_function.function
def run(x):
def train_step(x):
def computation(x):
return math_ops.square(x)
with backprop.GradientTape() as tape:
tape.watch(x) # Manually watch non-variable tensors.
y = computation(x)
grads = tape.gradient(y, x)
return grads
return distribution.experimental_local_results(
distribution.experimental_run_v2(train_step, args=(x,)))
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = run(x)
results.append(output)
self.assert_equal_flattened([[10., 12.], [14., 16.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"],
model_in_tf_function=[True, False]
))
def testNestedFunction(self, distribution, model_in_tf_function):
def model(x):
return x * x
if model_in_tf_function:
model = def_function.function(model)
with distribution.scope():
x = variables.Variable(1.0)
@def_function.function
def train_step():
def replica_step():
with backprop.GradientTape() as tape:
y = model(x)
return tape.gradient(y, x)
return distribution.experimental_run_v2(replica_step)
grads = distribution.experimental_local_results(train_step())
self.assertLen(grads, distribution.num_replicas_in_sync)
self.assertTrue(all(g is not None for g in grads))
class KerasModelsTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def test_lstm(self, distribution):
batch_size = 32
def create_lstm_model():
model = keras.models.Sequential()
# We only have LSTM variables so we can detect no gradient issues more
# easily.
model.add(
keras.layers.LSTM(1, return_sequences=False, input_shape=(10, 1)))
return model
def create_lstm_data():
seq_length = 10
x_train = np.random.rand(batch_size, seq_length, 1).astype("float32")
y_train = np.random.rand(batch_size, 1).astype("float32")
return x_train, y_train
x, y = create_lstm_data()
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(batch_size, drop_remainder=True)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with distribution.scope():
model = create_lstm_model()
optimizer = keras.optimizer_v2.gradient_descent.SGD()
@def_function.function
def train_step(input_iterator):
def step_fn(inputs):
inps, targ = inputs
with backprop.GradientTape() as tape:
output = model(inps)
loss = math_ops.reduce_mean(
keras.losses.binary_crossentropy(
y_true=targ, y_pred=output, from_logits=False))
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables))
return loss
outputs = distribution.experimental_run_v2(
step_fn, args=(next(input_iterator),))
return distribution.experimental_local_results(outputs)
train_step(input_iterator)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies, mode=["eager"]))
def test_nested_tf_functions(self, distribution):
# The test builds two computations with keras layers, one with nested
# tf.function, and the other without nested tf.function. We run these
# computations independently on the model with same weights, and make sure
# the variables are still the same after one training step.
inputs = np.random.random((10, 3)).astype(np.float32)
targets =
|
np.ones((10, 4), dtype=np.float32)
|
numpy.ones
|
# _*_ coding: utf-8 _*_
__author__ = '<NAME>'
__date__ = '1/13/2018 5:31 PM'
import numpy as np
A = np.arange(2,14).reshape((3,4))
print(A)
# find index of min, max mean
print(np.argmin(A))
print(np.argmax(A))
print(np.mean(A))
# Same as abover
print(A.mean())
#find median
print(np.median(A))
#cumulate sum
np.cumsum(A)
#find difference between first and second elements
np.diff(A)
# find non zero
|
np.nonzero(A)
|
numpy.nonzero
|
import numpy as np
import time
from scipy.special import digamma
class ShareNet(object):
def __init__(self,n_components,covariance_prior=None,
mean_prior=None,degrees_of_freedom_prior=None,
init_params='kmeans',random_state=1,beta_0=1):
np.random.seed(random_state)
self.K = n_components
self.init_params = init_params
self.beta_0 = beta_0
if covariance_prior is not None:
self.covariance_prior = covariance_prior
else:
self.covariance_prior = None
if degrees_of_freedom_prior is not None:
self.dof = degrees_of_freedom_prior
else:
self.dof = None
if mean_prior is not None:
self.mu_0 = mean_prior
else:
self.mu_0 = None
def update_m_tilde(self):
first_term = self.phi.dot(np.array([self.means_[k].dot(self.precisions_[k]) \
for k in range(self.K)]))
first_term += self.XdotV
self.m_tilde = (first_term[:,:,np.newaxis]*self.S_tilde).sum(1)
def update_S_tilde(self,use_block):
if use_block:
block_size = 10000
for i in range(0,self.N,block_size):
S_tilde_inv = np.einsum('ij,jkl->ikl',self.phi[i:i+block_size],\
self.precisions_)
N = S_tilde_inv.shape[0]
diag_indices = (np.repeat(np.arange(N),self.C),\
np.tile(np.arange(self.C),N),np.tile(np.arange(self.C),N))
S_tilde_inv[diag_indices] += self.V[i:i+block_size].flatten()
self.S_tilde[i:i+block_size,:,:] = np.linalg.inv(S_tilde_inv)
else:
diag_indices = (np.repeat(np.arange(self.N),self.C),\
np.tile(np.arange(self.C),self.N),np.tile(np.arange(self.C),self.N))
S_tilde_inv = np.einsum('ij,jkl->ikl',self.phi,self.precisions_)
S_tilde_inv[diag_indices] += self.V.flatten()
self.S_tilde = np.linalg.inv(S_tilde_inv)
def update_phi(self):
phi_unnormalized = np.zeros(self.phi.shape)
trace = np.einsum('lij,kji->kl', self.precisions_, self.S_tilde)
for k in range(self.K):
diff = self.m_tilde-self.means_[k]
quad = (diff.dot(self.precisions_[k])*diff).sum(1)
s,logdet = np.linalg.slogdet(self.B_tilde[k])
logdet *= s
digamma_value = sum([digamma((self.dof_tilde[k]+1-i)/2) \
for i in range(1,self.C+1)])
ll = -0.5*(quad + (self.C/(self.beta_0 + self.N_k[k])) + trace[:,k])
ll += 0.5*(logdet + digamma_value)
phi_unnormalized[:,k] = ll
self.phi = np.exp(phi_unnormalized)
self.phi = (self.phi.T/self.phi.sum(1)).T
self.phi[
|
np.isnan(self.phi)
|
numpy.isnan
|
####Please do not remove lines below####
from lmfit import Parameters
import numpy as np
import sys
import os
from functools import lru_cache
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./Functions'))
sys.path.append(os.path.abspath('./Fortran_rountines'))
####Please do not remove lines above####
####Import your modules below if needed####
from FormFactors.Sphere import Sphere
from Chemical_Formula import Chemical_Formula
from PeakFunctions import LogNormal, Gaussian
from utils import find_minmax
import time
class Sphere_Double_Layer: #Please put the class name same as the function name
def __init__(self, x=0, Np=50, flux=1e13, dist='Gaussian', Energy=None, relement='Au', NrDep=True, norm=1.0e-4,
sbkg=0.0, cbkg=0.0, abkg=0.0, nearIon='Rb', farIon='Cl', ionDensity=0.0, stThickness=1.0,
stDensity=0.0, dbLength=1.0, dbDensity=0.0,Ndb=20,
mpar={'Multilayers':{'Material': ['Au', 'H2O'], 'Density': [19.32, 1.0], 'SolDensity': [1.0, 1.0],'Rmoles': [1.0, 0.0], 'R': [1.0, 0.0], 'Rsig': [0.0, 0.0]}}):
"""
Documentation
Calculates the Energy dependent form factor of multilayered nanoparticles with different materials
x : Reciprocal wave-vector 'Q' inv-Angs in the form of a scalar or an array
relement : Resonant element of the nanoparticle. Default: 'Au'
Energy : Energy of X-rays in keV at which the form-factor is calculated. Default: None
Np : No. of points with which the size distribution will be computed. Default: 10
NrDep : Energy dependence of the non-resonant element. Default= 'True' (Energy Dependent), 'False' (Energy independent)
dist : The probablity distribution fucntion for the radii of different interfaces in the nanoparticles. Default: Gaussian
norm : The density of the nanoparticles in Molar (Moles/Liter)
sbkg : Constant incoherent background for SAXS-term
cbkg : Constant incoherent background for cross-term
abkg : Constant incoherent background for Resonant-term
nearIon : The ionic layer closer to the particle
farIon : The ionic layer farther from the particle
ionDensity : The bulk density of the ions in Moles per liter
stThickness : Thickness of the stern layer
stDensity : Density of the ions in the stern layer in Moles per liter
dbLength : The ratio of decay length and the stern layer thickness
dbDensity : The ratio of maximum density of the debye layer w.r.t the stern layer density
Ndb : Number of layers used to represent the double layer region
flux : Total X-ray flux to calculate the errorbar to simulate the errorbar for the fitted data
mpar : Multi-parameter which defines the following including the solvent/bulk medium which is the last one. Default: 'H2O'
Material ('Materials' using chemical formula),
Density ('Density' in gm/cubic-cms),
Density of solvent ('SolDensity' in gm/cubic-cms) of the particular layer
Mole-fraction ('Rmoles') of resonant element in the material)
Radii ('R' in Angs), and
Widths of the distributions ('Rsig' in Angs) of radii of all the interfaces present in the nanoparticle system. Default: [0.0]
"""
if type(x) == list:
self.x = np.array(x)
else:
self.x = x
self.norm = norm
self.sbkg = sbkg
self.cbkg = cbkg
self.abkg = abkg
self.dist = dist
self.nearIon=nearIon
self.farIon=farIon
self.ionDensity=ionDensity
self.stThickness=stThickness
self.stDensity=stDensity
self.dbLength=dbLength
self.dbDensity=dbDensity
self.Ndb=Ndb
self.Np = Np
self.Energy = Energy
self.relement = relement
self.NrDep = NrDep
# self.rhosol=rhosol
self.flux = flux
self.__mpar__ = mpar # If there is any multivalued parameter
self.choices = {'dist': ['Gaussian', 'LogNormal'],
'NrDep': [True, False]} # If there are choices available for any fixed parameters
self.__cf__ = Chemical_Formula()
self.__fit__ = False
self.output_params={'scaler_parameters':{}}
self.__mkeys__=list(self.__mpar__.keys())
self.init_params()
def init_params(self):
"""
Define all the fitting parameters like
self.params.add('sig',value = 0, vary = 0, min = -np.inf, max = np.inf, expr = None, brute_step = None)
"""
self.params = Parameters()
self.params.add('norm', value=self.norm, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('stThickness', value=self.stThickness, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('stDensity',value=self.stDensity, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('dbLength', value=self.dbLength, vary=0, min=1.0, max=np.inf, expr=None, brute_step=0.1)
self.params.add('dbDensity', value=self.dbDensity, vary=0, min=0, max=1, expr=None, brute_step=0.1)
self.params.add('sbkg', value=self.sbkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('cbkg', value=self.cbkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('abkg', value=self.abkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
for mkey in self.__mkeys__:
for key in self.__mpar__[mkey]:
if key != 'Material':
for i in range(len(self.__mpar__[mkey][key])):
self.params.add('__%s_%s_%03d' % (mkey, key, i), value=self.__mpar__[mkey][key][i], vary=0, min=-np.inf,
max=np.inf, expr=None, brute_step=0.1)
@lru_cache(maxsize=10)
def calc_rho(self, R=(1.0, 0.0), material=('Au', 'H2O'), relement='Au', density=(19.3, 1.0), sol_density=(1.0, 1.0),
Rmoles=(1.0, 0.0), Energy=None, NrDep='True'):
"""
Calculates the complex electron density of core-shell type multilayered particles in el/Angstroms^3
R :: list of Radii and subsequent shell thicknesses in Angstroms of the nanoparticle system
material :: list of material of all the shells starting from the core to outside
relement :: Resonant element
density :: list of density of all the materials in gm/cm^3 starting from the inner core to outside
Rmoles :: mole-fraction of the resonant element in the materials
Energy :: Energy in keV
"""
density = list(density)
if len(material) == len(density):
Nl = len(material)
rho = []
adensity = [] # Density of anomalous element
eirho = [] # Energy independent electron density
r = 0.0
rhor = []
eirhor = []
adensityr = []
for i in range(Nl):
mat = material[i].split(':')
if len(mat) == 2:
solute, solvent = mat
element_adjust = None
if '*' in solute:
m = solute.split('*')[0]
f = self.__cf__.parse(m)
element_adjust = self.__cf__.elements()[-1]
solute_formula = self.__cf__.parse(solute)
if relement in solute_formula.keys():
if element_adjust is not None:
self.__cf__.formula_dict[relement] = 0.0
t1 = self.__cf__.molar_mass()
self.__cf__.formula_dict[element_adjust] = self.__cf__.element_mole_ratio()[
element_adjust] - Rmoles[i]
self.__cf__.formula_dict[relement] = Rmoles[i]
t2 = self.__cf__.molar_mass()
if t1 > 0:
fac = t2 / t1
density[i] = fac * density[i]
solute_elements = self.__cf__.elements()
solute_mw = self.__cf__.molecular_weight()
solute_mv = self.__cf__.molar_volume()
solute_mole_ratio = self.__cf__.element_mole_ratio()
solvent_formula = self.__cf__.parse(solvent)
solvent_elements = self.__cf__.elements()
solvent_mw = self.__cf__.molecular_weight()
solvent_mole_ratio = self.__cf__.element_mole_ratio()
solvent_moles = sol_density[i] * (1 - solute_mv * density[i] / solute_mw) / solvent_mw
solute_moles = density[i] / solute_mw
total_moles = solvent_moles + solute_moles
solvent_mole_fraction = solvent_moles / total_moles
solute_mole_fraction = solute_moles / total_moles
comb_material = ''
for ele in solute_mole_ratio.keys():
comb_material += '%s%.10f' % (ele, solute_mole_ratio[ele] * solute_mole_fraction)
for ele in solvent_mole_ratio.keys():
comb_material += '%s%.10f' % (ele, solvent_mole_ratio[ele] * solvent_mole_fraction)
density[i] = density[i] + sol_density[i] * (1 - solute_mv * density[i] / solute_mw)
# self.output_params['scaler_parameters']['density[%s]' % material[i]]=tdensity
else:
element_adjust = None
if '*' in material[i]:
m = material[i].split('*')[0]
f = self.__cf__.parse(m)
element_adjust = self.__cf__.elements()[-1]
formula = self.__cf__.parse(material[i])
fac = 1.0
if relement in formula.keys():
self.__cf__.formula_dict[relement] = 0.0
t1 = self.__cf__.molar_mass()
if element_adjust is not None:
self.__cf__.formula_dict[element_adjust] = self.__cf__.element_mole_ratio()[
element_adjust] - Rmoles[i]
self.__cf__.formula_dict[relement] = Rmoles[i]
t2 = self.__cf__.molar_mass()
if t1 > 0:
fac = t2 / t1
mole_ratio = self.__cf__.element_mole_ratio()
comb_material = ''
for ele in mole_ratio.keys():
comb_material += '%s%.10f' % (ele, mole_ratio[ele])
density[i] = fac * density[i]
tdensity = density[i]
formula = self.__cf__.parse(comb_material)
molwt = self.__cf__.molecular_weight()
elements = self.__cf__.elements()
mole_ratio = self.__cf__.element_mole_ratio()
# numbers=np.array(chemical_formula.get_element_numbers(material[i]))
moles = [mole_ratio[ele] for ele in elements]
nelectrons = 0.0
felectrons = complex(0.0, 0.0)
aden = 0.0
for j in range(len(elements)):
f0 = self.__cf__.xdb.f0(elements[j], 0.0)[0]
nelectrons = nelectrons + moles[j] * f0
if Energy is not None:
if elements[j] != relement:
if NrDep:
f1 = self.__cf__.xdb.f1_chantler(element=elements[j], energy=Energy * 1e3, smoothing=0)
f2 = self.__cf__.xdb.f2_chantler(element=elements[j], energy=Energy * 1e3, smoothing=0)
felectrons = felectrons + moles[j] * complex(f1, f2)
else:
f1 = self.__cf__.xdb.f1_chantler(element=elements[j], energy=Energy * 1e3, smoothing=0)
f2 = self.__cf__.xdb.f2_chantler(element=elements[j], energy=Energy * 1e3, smoothing=0)
felectrons = felectrons + moles[j] * complex(f1, f2)
if elements[j] == relement:
aden += 0.6023 * moles[j] * tdensity / molwt
adensity.append(
aden) # * np.where(r > Radii[i - 1], 1.0, 0.0) * pl.where(r <= Radii[i], 1.0, 0.0) / molwt
eirho.append(0.6023 * (
nelectrons) * tdensity / molwt) # * np.where(r > Radii[i - 1], 1.0,0.0) * pl.where(r <= Radii[i], 1.0,0.0) / molwt
rho.append(0.6023 * (
nelectrons + felectrons) * tdensity / molwt) # * np.where(r > Radii[i - 1], 1.0,0.0) * pl.where(r <= Radii[i], 1.0, 0.0) / molwt
rhor.append([r, np.real(rho[-1])])
eirhor.append([r, np.real(eirho[-1])])
adensityr.append([r, np.real(adensity[-1])])
r = r + R[i]
rhor.append([r, np.real(rho[-1])])
eirhor.append([r, np.real(eirho[-1])])
adensityr.append([r, np.real(adensity[-1])])
rhor, eirhor, adensityr = np.array(rhor), np.array(eirhor), np.array(adensityr)
rhor[-1, 0] = rhor[-1, 0] + R[-2]
eirhor[-1, 0] = eirhor[-1, 0] + R[-2]
adensityr[-1, 0] = adensityr[-1, 0] + R[-2]
self.output_params['Density'] = {'x': np.cumsum(R), 'y': density,
'names': ['r (Angs)', 'density (gm/cm^3)']}
return
|
np.array(rho)
|
numpy.array
|
#!/usr/bin/env python
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import patches
import matplotlib as mpl
sys.path.append('..')
from models.car_rear_axle import Car
from controllers.ModelPredictiveController import MPC
from trajectory_generation.CubicSpline import Spline2D
GOAL_EPS = 0.1
# Vehicle parameters
WIDTH = 1.0 # m
WHEEL_LEN = 0.3 # m
WHEEL_WIDTH = 0.2 # m
TREAD = 0.7 # m
L = 3.0 # m
def distance(a, b):
return np.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)
def find_closest_waypoint(car, cx, cy):
distances = np.sum(( np.array([[car.x], [car.y]]) -
np.stack((cx, cy)) )**2, axis=0)
idx = np.argmin(distances)
return idx, cx[idx], cy[idx]
def plot_car(x, y, yaw, delta=0.0, cabcolor="-r", truckcolor="-k"):
x_f = x + np.cos(yaw) * L
y_f = y + np.sin(yaw) * L
plt.plot([x, x_f], [y, y_f], 'k')
rear_axle_x1 = x + WIDTH * np.cos(yaw - 1.57) / 2
rear_axle_y1 = y + WIDTH * np.sin(yaw - 1.57) / 2
rear_axle_x2 = x + WIDTH * np.cos(yaw + 1.57) / 2
rear_axle_y2 = y + WIDTH * np.sin(yaw + 1.57) / 2
plt.plot([rear_axle_x1, rear_axle_x2], [rear_axle_y1, rear_axle_y2], 'k')
front_axle_x1 = x_f + WIDTH * np.cos(yaw - 1.57) / 2
front_axle_y1 = y_f + WIDTH * np.sin(yaw - 1.57) / 2
front_axle_x2 = x_f + WIDTH * np.cos(yaw + 1.57) / 2
front_axle_y2 = y_f + WIDTH * np.sin(yaw + 1.57) / 2
plt.plot([front_axle_x1, front_axle_x2], [front_axle_y1, front_axle_y2], 'k')
right_rear_wheel = np.array([[WHEEL_LEN, -WHEEL_LEN, -WHEEL_LEN, WHEEL_LEN,
WHEEL_LEN],
[-WHEEL_WIDTH - TREAD, -WHEEL_WIDTH - TREAD,
WHEEL_WIDTH - TREAD, WHEEL_WIDTH - TREAD,
-WHEEL_WIDTH - TREAD]])
right_front_wheel = np.copy(right_rear_wheel)
left_rear_wheel = np.copy(right_rear_wheel)
left_rear_wheel[1, :] *= -1
left_front_wheel = np.copy(right_front_wheel)
left_front_wheel[1, :] *= -1
R_yaw = np.array([[np.cos(yaw), np.sin(yaw)],
[-np.sin(yaw), np.cos(yaw)]])
R_delta = np.array([[np.cos(delta), np.sin(delta)],
[-np.sin(delta), np.cos(delta)]])
right_rear_wheel = R_yaw.T @ right_rear_wheel
left_rear_wheel = R_yaw.T @ left_rear_wheel
right_front_wheel = R_delta.T @ right_front_wheel
left_front_wheel = R_delta.T @ left_front_wheel
right_front_wheel[0, :] += L
left_front_wheel[0, :] += L
right_front_wheel = R_yaw.T @ right_front_wheel
left_front_wheel = R_yaw.T @ left_front_wheel
right_rear_wheel[0, :] += x
right_rear_wheel[1, :] += y
left_rear_wheel[0, :] += x
left_rear_wheel[1, :] += y
right_front_wheel[0, :] += x
right_front_wheel[1, :] += y
left_front_wheel[0, :] += x
left_front_wheel[1, :] += y
plt.plot(np.array(right_rear_wheel[0, :]).flatten(),
np.array(right_rear_wheel[1, :]).flatten(), truckcolor)
plt.plot(np.array(left_rear_wheel[0, :]).flatten(),
np.array(left_rear_wheel[1, :]).flatten(), truckcolor)
plt.plot(np.array(right_front_wheel[0, :]).flatten(),
|
np.array(right_front_wheel[1, :])
|
numpy.array
|
# coding: utf-8
# Creates:
# * nebular_spectra_OI.pdf
# In[4]:
import os
import copy
from astropy.io import ascii as asc
from astropy.io import fits
from astropy.time import Time
import astropy.units as u
import astropy.constants as c
import numpy as np
from matplotlib import pyplot as plt
#get_ipython().run_line_magic('matplotlib', '')
import matplotlib as mpl
from utilities_az import spectroscopy as spec, supernova, define_filters
# In[5]:
plt.style.use(['seaborn-paper', 'az-paper-onecol'])
# Confirmed that scaled spectra files have not been de-redshifted
# In[6]:
FIG_DIR = '.'
neb_repo = '/Users/bostroem/Desktop/research/asassn15oz/data/spectra/EFOSC/scaled'
GEMINI_DIR = '/Users/bostroem/Desktop/research/asassn15oz/data/spectra/gmos/'
model_repo = '../../../nebular_spectra_OI/models/'
# ## Read in spectra and de-redshift
# In[7]:
specfile_212 = os.path.join(neb_repo, 'tASASSN_15oz_20160410_Gr13_Free_slit1.5_57723_1_esca.asci')
tbdata_212 = asc.read(specfile_212, names = ['wavelength', 'flux'])
wl_212 = spec.apply_redshift(tbdata_212['wavelength'], redshift=0.0069)
spectrum_212 = tbdata_212['flux']
# In[8]:
specfile_340 = os.path.join(neb_repo, 'tASASSN_15oz_20160802_Gr13_Free_slit1.0_57723_1_esca.asci')
#specfile = os.path.join(neb_repo, 'asassn15oz_20160919a_387dayssca.fits')#low S/N
tbdata_340 = asc.read(specfile_340, names = ['wavelength', 'flux'])
wl_340 = spec.apply_redshift(tbdata_340['wavelength'], redshift=0.0069)
spectrum_340 = tbdata_340['flux']
# In[9]:
specfile_306 = os.path.join(GEMINI_DIR, 'gmos_merge_rest_dustcorrsca.dat')
#specfile_306 = os.path.join(GEMINI_DIR, 'gmos_merge_rest_dustcorr.dat')
tbdata_306 = asc.read(specfile_306, names=['wave', 'flux'])
wl_306 = tbdata_306['wave']
spectrum_306 = tbdata_306['flux']
# ## Read in Models
# In[10]:
modelfile12_212 = os.path.join(model_repo, 'mzams12_212d.dat')
modelfile15_212 = os.path.join(model_repo, 'mzams15_212d.dat')
modelfile19_212 = os.path.join(model_repo, 'mzams19_212d.dat')
modelfile25_212 = os.path.join(model_repo, 'mzams25_212d.dat')
# In[11]:
modelfile12_340 = os.path.join(model_repo, 'mzams12_306d.dat')
modelfile15_340 = os.path.join(model_repo, 'mzams15_350d.dat')
modelfile19_340 = os.path.join(model_repo, 'mzams19_369d.dat')
modelfile25_340 = os.path.join(model_repo, 'mzams25_369d.dat')
# In[12]:
modelfile12_306 = os.path.join(model_repo, 'mzams12_306d.dat')
modelfile15_306 = os.path.join(model_repo, 'mzams15_306d.dat')
modelfile19_306 = os.path.join(model_repo, 'mzams19_250d.dat')
modelfile25_306 = os.path.join(model_repo, 'mzams25_306d.dat')
# In[13]:
mod12_212 = asc.read(modelfile12_212, names = ['wavelength', 'flux'])
mod15_212 = asc.read(modelfile15_212, names = ['wavelength', 'flux'])
mod19_212 = asc.read(modelfile19_212, names = ['wavelength', 'flux'])
mod25_212 = asc.read(modelfile25_212, names = ['wavelength', 'flux'])
# In[14]:
mod12_340 = asc.read(modelfile12_340, names = ['wavelength', 'flux'])
mod15_340 = asc.read(modelfile15_340, names = ['wavelength', 'flux'])
mod19_340 = asc.read(modelfile19_340, names = ['wavelength', 'flux'])
mod25_340 = asc.read(modelfile25_340, names = ['wavelength', 'flux'])
# In[15]:
mod12_306 = asc.read(modelfile12_306, names = ['wavelength', 'flux'])
mod15_306 = asc.read(modelfile15_306, names = ['wavelength', 'flux'])
mod19_306 = asc.read(modelfile19_306, names = ['wavelength', 'flux'])
mod25_306 = asc.read(modelfile25_306, names = ['wavelength', 'flux'])
# ## Create Spectrum Objects
# In[16]:
mod_spec12_212 = spec.spectrum1d(mod12_212['wavelength'], mod12_212['flux'])
mod_spec15_212 = spec.spectrum1d(mod15_212['wavelength'], mod15_212['flux'])
mod_spec19_212 = spec.spectrum1d(mod19_212['wavelength'], mod19_212['flux'])
mod_spec25_212 = spec.spectrum1d(mod25_212['wavelength'], mod25_212['flux'])
data_spec_212 = spec.spectrum1d(wl_212-10, spectrum_212)
# In[17]:
mod_spec12_340 = spec.spectrum1d(mod12_340['wavelength'], mod12_340['flux'])
mod_spec15_340 = spec.spectrum1d(mod15_340['wavelength'], mod15_340['flux'])
mod_spec19_340 = spec.spectrum1d(mod19_340['wavelength'], mod19_340['flux'])
mod_spec25_340 = spec.spectrum1d(mod25_340['wavelength'], mod25_340['flux'])
data_spec_340 = spec.spectrum1d(wl_340-20, spectrum_340)
# In[18]:
mod_spec12_306 = spec.spectrum1d(mod12_306['wavelength'], mod12_306['flux'])
mod_spec15_306 = spec.spectrum1d(mod15_306['wavelength'], mod15_306['flux'])
mod_spec19_306 = spec.spectrum1d(mod19_306['wavelength'], mod19_306['flux'])
mod_spec25_306 = spec.spectrum1d(mod25_306['wavelength'], mod25_306['flux'])
data_spec_306 = spec.spectrum1d(wl_306-20, spectrum_306)
# # Scale Models to spectrum
# In[19]:
Ni_mass_mod = 0.062 #Msun
d_mod = 5.5 #Mpc
d_15oz = 28.83 #Mpc, NED Hubble + Virgo Infall
Co_halflife = 111.4
t_obs_306 = 288.0
t_mod_12_306 = 306.0
t_mod_15_306 = 306.0
t_mod_19_306 = 250.0
t_mod_25_306 = 306.0
t_obs_340 = 340.0
t_mod_12_340 = 306.0
t_mod_15_340 = 350.0
t_mod_19_340 = 369.0
t_mod_25_340 = 369.0
t_obs_212 = 228.0
t_mod_12_212 = 212.0
t_mod_15_212 = 212.0
t_mod_19_212 = 212.0
t_mod_25_212 = 212.0
# ##### Scale by time difference
# In[20]:
#Create new object
scale_time_mod_spec12_212 = copy.deepcopy(mod_spec12_212)
scale_time_mod_spec15_212 = copy.deepcopy(mod_spec15_212)
scale_time_mod_spec19_212 = copy.deepcopy(mod_spec19_212)
scale_time_mod_spec25_212 = copy.deepcopy(mod_spec25_212)
scale_time_mod_spec12_212.flux = scale_time_mod_spec12_212.flux*np.exp((t_mod_12_212-t_obs_212)/Co_halflife)
scale_time_mod_spec15_212.flux = scale_time_mod_spec15_212.flux*
|
np.exp((t_mod_15_212-t_obs_212)/Co_halflife)
|
numpy.exp
|
import os
import numpy as np
from utils.constants import TZ_COND_DICT
from analysis import compute_stats, remove_none
from scipy.stats import pearsonr, sem, ttest_ind
from scipy import stats
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='white', palette='colorblind', context='poster')
from itertools import product
from models import LCALSTM as Agent
from task import SequenceLearning
from exp_ms import run_ms
from utils.params import P
from analysis import compute_cell_memory_similarity, process_cache, \
process_cache_ms, get_trial_cond_ids, get_roll_av, create_sim_dict, \
compute_cell_memory_similarity_stats, n_epoch_inpt_calc, get_trial_cond_ids
from utils.io import build_log_path, load_ckpt, pickle_save_dict, \
get_test_data_dir, get_test_data_fname, load_env_metadata, pickle_load_dict
log_root = '/Users/carsonwardell/Desktop/Thesis/log/' #'/tigress/cwardell/logs/learn-hippocampus/log/'
exp_name = 'Mental-Sims-v_old_5-actor_f-olda2c'
#exp_name = 'Mental-Sims-v_old_3.3-olda2c-frozenactor'
def_prob = None
n_def_tps = 0
n_examples = 128
seed = 0
supervised_epoch = 600
epoch_load = 1000
n_epochs = 350
n_branch = 4
n_param = 16
enc_size = 16
# enc_size_test = 8
enc_size_test = enc_size
n_event_remember = 2
penalty_random = 1
# testing param, ortho to the training directory
attach_cond = 0
# loading params
pad_len_load = 0
p_rm_ob_enc_load =0 #.3
p_rm_ob_rcl_load =0 #.3
# testing params
pad_len = 0
p_test = 0
p_rm_ob_enc_test = p_test
p_rm_ob_rcl_test = p_test
n_examples_test = 256
similarity_max_test = .9
similarity_min_test = 0
lr=8e-4
# load lca params
comp_val = .8
leak_val = 0
'''loop over conditions for testing'''
slience_recall_time = None
penalty_train = 5
penalty_test = np.array([2])
seed_num = 2
# get cond ids (same for all trials)
log_cond = 'DM'
cond_ids = get_trial_cond_ids(log_cond)
cond_ids['DM'] = True
memory_types = ['targ', 'lure']
gr_pal = sns.color_palette('colorblind')[2:4]
scramble_option = False
penalty = 5
seed_num = 4
T_total = 32
T_part = int(T_total/2)
p = P(
exp_name=exp_name, sup_epoch=supervised_epoch,
n_param=n_param, n_branch=n_branch, pad_len=pad_len_load,
def_prob=def_prob, n_def_tps=n_def_tps,
enc_size=enc_size, n_event_remember=n_event_remember,
penalty=penalty_train, penalty_random=penalty_random,
attach_cond=attach_cond, lr=lr,
p_rm_ob_enc=p_rm_ob_enc_load, p_rm_ob_rcl=p_rm_ob_rcl_load,
)
task = SequenceLearning(
n_param=p.env.n_param, n_branch=p.env.n_branch, pad_len=pad_len,
p_rm_ob_enc=p_rm_ob_enc_test, p_rm_ob_rcl=p_rm_ob_rcl_test,
similarity_max=similarity_max_test, similarity_min=similarity_min_test,
similarity_cap_lag=p.n_event_remember,
)
x_dim = task.x_dim
if attach_cond != 0:
x_dim += 1
# load the agent back
agent = Agent(
input_dim=x_dim, output_dim=p.a_dim,
rnn_hidden_dim=p.net.n_hidden, dec_hidden_dim=p.net.n_hidden_dec,
dict_len=p.net.dict_len
)
''' data logs'''
Log_caches = []
Log_full_caches = []
av_sims_data = []
origins_data = []
Log_sem_caches = []
all_sims_lengs = []
all_epoch_reward = []
Log_sim_cos = [None] * 40
Log_sim_lca = [None] * 40
'''pull data from all subjects'''
for idx, subj_id in enumerate(range(0,11)):
# create logging dirs
log_path, log_subpath = build_log_path(
subj_id, p, log_root=log_root, mkdir=False, verbose=False
)
# init env
log_subpath
env_data = load_env_metadata(log_subpath)
def_path = env_data['def_path']
p.env.def_path = def_path
p.update_enc_size(enc_size_test)
test_params = [penalty, pad_len, slience_recall_time]
test_data_dir, _ = get_test_data_dir(
log_subpath, epoch_load, test_params)
test_data_fname = get_test_data_fname(
n_examples, None, False)
fpath = os.path.join(test_data_dir, test_data_fname)
print(fpath)
print(subj_id)
dict = pickle_load_dict(fpath)
Log_full_caches.append(dict['Log_caches'][2])
Log_sem_caches.append(dict['Log_caches'][1])
Log_caches.append(dict['Log_caches'][0])
av_sims_data.append(dict['av_sims_data'])
origins_data.append(dict['origins_data'])
all_sims_lengs.append(dict['full_sim_data'][0])
all_epoch_reward.append(dict['full_sim_data'][1])
#'full_sim_data': [all_sims_lengs, all_epoch_reward]}
''' lets do learning curves first '''
# first average the data across subjs and get SEM
sa_sim_lengths = np.mean(av_sims_data, axis=0) #sa means averaged across subjs
sm_sim_lengths = sem(av_sims_data, axis=0) #sm means sem across subjs
# now process rolling averages
w = 3 # set a window
r_sims_lengs, r_epoch_reward = get_roll_av(sa_sim_lengths[0], sa_sim_lengths[1], w)
r_sims_lengs = np.asarray(r_sims_lengs)
r_epoch_reward = np.asarray(r_epoch_reward)
#fill in initial vals
r_sims_lengs[0:w] = np.reshape(sa_sim_lengths[0][0:w], (-1,1))
r_epoch_reward[0:w] = np.reshape(sa_sim_lengths[1][0:w], (-1,1))
# process for histogram
# now we plot
sk = 3 # skip num
f, ax = plt.subplots(1,1,figsize=(12, 6)) #, sharex=True)
ax.errorbar(
x=np.arange(n_epochs)[::sk],
y=r_sims_lengs[::sk],
yerr=sm_sim_lengths[0][::sk],
label = 'average simulation length',
alpha=.4,
)
ax.set_ylabel('simulation length')
ax.axhline(1, color='grey', linestyle='--')
ax.set_xlabel('epoch')
ax2 = ax.twinx()
ax2.errorbar(
x=np.arange(n_epochs)[::sk],
y=r_epoch_reward[::sk],
yerr=sm_sim_lengths[0][::sk],
label = 'average cumulative reward',
color = 'r',
alpha=.4
)
ax2.set_ylabel("average reward")
ax2.legend(loc='best', bbox_to_anchor=(1, 0.5))
ax.legend(loc='best', bbox_to_anchor=(1, 0.37))
f.show()
# plot again but next to eachother
# now lets do two plots
sk = 1
fig = plt.figure(figsize=(12,8))
AX = plt.GridSpec(2, 3)
AX.update(wspace = 1, hspace = 1)
ax1 = plt.subplot(AX[0,:2])
ax2 = plt.subplot(AX[1,:2])
ax3 = plt.subplot(AX[1,2:])
ax2.plot(np.arange(n_epochs)[::sk],
r_epoch_reward[::sk],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average cumulative reward',
color = 'gray',
alpha=1,
linewidth=1
)
# get upper and lower bounds
upper = np.add(r_epoch_reward.flatten(), sm_sim_lengths[1].flatten())
lower = np.subtract(r_epoch_reward.flatten(),sm_sim_lengths[1].flatten())
ax2.fill_between(np.arange(n_epochs),
upper,
lower,
color = 'gray',
alpha=.4,
linewidth=.1
)
ax1.plot(np.arange(n_epochs)[::sk],
r_sims_lengs[::sk],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
color = 'g',
alpha=1,
linewidth=1
)
# get upper and lower bounds
upper = np.add(r_sims_lengs.flatten(), sm_sim_lengths[0].flatten())
lower = np.subtract(r_sims_lengs.flatten(),sm_sim_lengths[0].flatten())
ax1.fill_between(np.arange(n_epochs),
upper,
lower,
color = 'g',
alpha=.4,
linewidth=.1
)
ax3.hist(all_sims_lengs[1][0,:], bins=16, label='first epoch',
alpha=.6, density=True, color = gr_pal[-1])
ax3.title.set_text("")
ax3.hist(all_sims_lengs[1][-1,:], bins=16, label="last epoch",
alpha=.6, density=True, color = gr_pal[3])
#ax3.legend(prop={'size': 20})
sns.despine()
fs = 19
ax2.set_ylabel("avg. cumulative reward", fontsize = fs)
ax2.set_xlabel("epochs", fontsize = fs)
ax1.set_ylabel("avg. simulation length", fontsize = fs)
ax1.set_xlabel("epochs", fontsize = fs)
ax1.axhline(1, color='grey', linestyle='--', alpha=.5)
ax3.set_xlabel("simulation length", fontsize = fs)
ax3.set_ylabel("proportion", fontsize = fs)
ax3.set_xticks([0,15])
ax3.set_yticks([0,.5,1])
#ax1.legend(loc='best')
#ax2.legend(loc='best')
sns.despine()
fig.tight_layout()
fig.show()
'''now plot input'''
np.shape(Log_full_caches)
np.shape(sem_c)
Log_caches_ = np.mean(Log_caches, axis=0)
inpt_sem = sem(Log_caches, axis=0)
Log_sem_caches_ = np.mean(Log_sem_caches, axis=0)
n_e = 1
mu_first, er_first, mu_last, er_last = n_epoch_inpt_calc(Log_caches_,
Log_sem_caches_,
n_e, axis=0)
print("mu shape:", np.shape(mu_first.flatten()))
print("err shape:", np.shape(er_first.flatten()))
f, ax = plt.subplots(1, 1, figsize=(5, 10))
ax.errorbar(
x=np.arange(n_param), y=mu_first, yerr=inpt_sem[0], label="first %d epochs" % (n_e))
ax.errorbar(
x=np.arange(n_param), y=mu_last, yerr=inpt_sem[-1], label="last %d epochs" % (n_e))
ax.legend()
#ax[0].set_ylim([-.05, .7])
ax.set_ylabel('input gate value')
ax.set_xlabel('Time')
ax.set_xticks(np.arange(0, p.env.n_param, p.env.n_param - 1))
'''Now plot sim origins '''
np.shape(origins_data)
av_origins = np.mean(origins_data, axis=0)
sem_origins = sem(origins_data, axis=0)
labels = ['target','lure',
'target/lure overlap', 'novel']
f, ax = plt.subplots(figsize=(15, 10)) #, sharex=True)
for orig in range(np.shape(av_origins)[0]):
upper = np.add(av_origins[orig].flatten(), sem_origins[orig].flatten())
lower = np.subtract(av_origins[orig].flatten(), sem_origins[orig].flatten())
ax.fill_between(np.arange(n_epochs),
upper,
lower,
alpha=.4,
linewidth=.1
)
ax.plot(np.arange(n_epochs),
av_origins[orig],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
alpha=1,
linewidth=1,
label = labels[orig]
)
ax.set_ylabel('% of total instances per epoch')
ax.set_xlabel('epoch')
sns.despine()
ax.legend(loc = 'center left', title = 'feature origins:',
title_fontsize = 'large')
f.tight_layout()
f.show()
'''plot sim origin and input together'''
fig = plt.figure(figsize=(17,8))
AX = plt.GridSpec(1, 5)
AX.update(wspace = 2, hspace = 0.5)
ax1 = plt.subplot(AX[0,:3])
ax2 = plt.subplot(AX[0,3:])
# first origins
for orig in range(np.shape(av_origins)[0]):
upper = np.add(av_origins[orig].flatten(), sem_origins[orig].flatten())
lower = np.subtract(av_origins[orig].flatten(), sem_origins[orig].flatten())
ax1.fill_between(np.arange(n_epochs),
upper,
lower,
alpha=.4,
linewidth=.1,
orig_colors = colors[orig]
)
ax1.plot(np.arange(n_epochs),
av_origins[orig],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
alpha=1,
linewidth=1,
label = labels[orig],
orig_colors = colors[orig]
)
ax1.set_ylabel('proportion of ouputted features')
ax1.set_xlabel('epoch')
ax1.set_yticks([.1,.3,.5,.7])
sns.despine()
#ax1.legend(loc = 'center left', title = 'feature origins:',
# prop={'size': 20}, ncol=2, title_fontsize = 'small')
#f.tight_layout()
#f.show()
#now inpt
Log_caches_ = np.mean(Log_caches, axis=0)
# get SEM
inpt_sem = sem(Log_caches, axis=0)
np.shape(inpt_sem)
n_e = 1
#mu_first, er_first, mu_last, er_last = n_epoch_inpt_calc(Log_caches_,
# Log_sem_caches_,)
labels = ['first epoch','last epoch']
ep_colors = [gr_pal[-1],gr_pal[3]]
conds = [0,-1]
for idx, cond in enumerate(conds):
upper = np.add(Log_caches_[cond].flatten(), inpt_sem[cond].flatten())
lower = np.subtract(Log_caches_[cond].flatten(), inpt_sem[cond].flatten())
ax2.fill_between(np.arange(T_part),
upper,
lower,
alpha=.3,
linewidth=.1,
color = ep_colors[idx]
)
ax2.plot(np.arange(T_part),
Log_caches_[cond],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
alpha=1,
linewidth=2,
label = labels[idx],
color = ep_colors[idx]
)
ax2.set_ylabel('input gate value')
ax2.set_xlabel('time')
#print(ax2.get_xticks())
ax2.set_xticks([0,15])
ax2.set_yticks([0,.3,.6])
ax2.axvline(2, color='grey', linestyle='--', alpha=.5)
ax1.axhline(.75, color='grey', linestyle='--', alpha=.5)
sns.despine()
#ax2.legend(prop={'size': 20})#loc = 'center left', title = 'feature origins:',
#title_fontsize = 'large')
'''plot ratio'''
log_ratios = []
for i in range(0,11):
sub_data = origins_data[i]
log_ratios.append(sub_data[0]/sub_data[1])
av_ratio = np.mean(log_ratios, axis=0)
sem_ratio = sem(log_ratios, axis=0)
f_l_ratios = [av_ratio[0],av_ratio[-1]]
f_l_err = [sem_ratio[0],sem_ratio[-1]]
# calculate significance between first and last
p_val = ttest_ind(np.asarray(log_ratios)[:,0],np.asarray(log_ratios)[:,-1])
print(p_val)
np.shape(np.asarray(log_ratios)[:,0])
labels = ['first epoch','last epoch']
f, ax = plt.subplots(1,1,figsize=(6, 6)) #, sharex=True)
ax.bar(
x=(np.arange(2)),
height=f_l_ratios,
yerr=f_l_err,
color = ep_colors)
ax.set_xticks(np.arange(len(labels)))
ax.set_ylabel('target / lure ratio')
ax.set_xticklabels(labels, fontsize=20)
ax.set_yticks([0,.5,1,1.5])
ax3.axhline(1, color='grey', linestyle='--', alpha=.5)
sns.despine()
'''single origin example and novel/mem1 ratio'''
labels = ['target','lure',
'target/lure overlap', 'novel']
orig_colors = [gr_pal[0],gr_pal[1], gr_pal[2], gr_pal[5]]
plot_data = origins_data[1]
f, ax = plt.subplots(1,1, figsize=(6,6))
for orig in range(np.shape(av_origins)[0]):
upper = np.add(plot_data[orig].flatten(), sem_origins[orig].flatten())
lower = np.subtract(plot_data[orig].flatten(), sem_origins[orig].flatten())
ax.fill_between(np.arange(n_epochs),
upper,
lower,
alpha=.4,
linewidth=.1,
color = orig_colors[orig]
)
ax.plot(np.arange(n_epochs),
plot_data[orig],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
alpha=1,
linewidth=1,
label = labels[orig],
color = orig_colors[orig]
)
ax.set_ylabel('proportion of ouputted features')
ax.set_xlabel('epoch')
ax.set_yticks([.1,.3,.5,.7])
sns.despine()
# process data
log_ratios_nt = [] # novel/target log
for i in range(0,11):
sub_data = origins_data[i]
log_ratios_nt.append(sub_data[0]/sub_data[-1])
av_ratio_nt = np.mean(log_ratios_nt, axis=0)
sem_ratio_nt = sem(log_ratios_nt, axis=0)
f_l_ratios_nt = [av_ratio_nt[0],av_ratio_nt[-1]]
f_l_err_nt = [sem_ratio_nt[0],sem_ratio_nt[-1]]
p_val = ttest_ind(np.asarray(log_ratios)[:,0],np.asarray(log_ratios)[:,-1])
print(p_val)
f, ax = plt.subplots(1,1, figsize=(6,6))
labels = ['first epoch','last epoch']
ax.bar(
x=(np.arange(2)),
height=f_l_ratios_nt,
yerr=f_l_err_nt,
color = ep_colors)
''' final plot '''
# first, reprocess data for sim lengths
log_ratios = []
for i in range(0,11):
sub_data = origins_data[i]
log_ratios.append(sub_data[0]/sub_data[1])
av_ratio = np.mean(log_ratios, axis=0)
sem_ratio = sem(log_ratios, axis=0)
f_l_ratios = [av_ratio[0],av_ratio[-1]]
f_l_err = [sem_ratio[0],sem_ratio[-1]]
# now process for inpt
Log_caches_ = np.mean(Log_caches, axis=0)
inpt_sem = sem(Log_caches, axis=0)
np.shape(inpt_sem)
# now process for sim lengths
sa_sim_lengths = np.mean(av_sims_data, axis=0) #sa means averaged across subjs
sm_sim_lengths = sem(av_sims_data, axis=0) #sm means sem across subjs
# now process rolling averages
w = 3 # set a window
r_sims_lengs, r_epoch_reward = get_roll_av(sa_sim_lengths[0], sa_sim_lengths[1], w)
r_sims_lengs = np.asarray(r_sims_lengs)
r_epoch_reward = np.asarray(r_epoch_reward)
#fill in initial vals
r_sims_lengs[0:w] = np.reshape(sa_sim_lengths[0][0:w], (-1,1))
r_epoch_reward[0:w] = np.reshape(sa_sim_lengths[1][0:w], (-1,1))
# make graph
fig = plt.figure(figsize=(15,25))
AX = plt.GridSpec(6, 6)
AX.update(wspace = 2.5, hspace = 1.5)
ax1 = plt.subplot(AX[:2,:4])
ax2 = plt.subplot(AX[2:4,:4])
ax3 = plt.subplot(AX[4:,:4])
ax4 = plt.subplot(AX[:2,4:])
ax5 = plt.subplot(AX[2:4,4:])
ax6 = plt.subplot(AX[4:5,4:])
ax7 = plt.subplot(AX[5:,4:])
ax1.plot(np.arange(n_epochs)[::sk],
r_sims_lengs[::sk],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
color = 'g',
alpha=1,
linewidth=1
)
# get upper and lower bounds
upper = np.add(r_sims_lengs.flatten(), sm_sim_lengths[0].flatten())
lower = np.subtract(r_sims_lengs.flatten(),sm_sim_lengths[0].flatten())
ax1.fill_between(np.arange(n_epochs),
upper,
lower,
color = 'g',
alpha=.4,
linewidth=.1
)
ax2.plot(np.arange(n_epochs)[::sk],
r_epoch_reward[::sk],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average cumulative reward',
color = 'gray',
alpha=1,
linewidth=1
)
# get upper and lower bounds
upper = np.add(r_epoch_reward.flatten(), sm_sim_lengths[1].flatten())
lower = np.subtract(r_epoch_reward.flatten(),sm_sim_lengths[1].flatten())
ax2.fill_between(np.arange(n_epochs),
upper,
lower,
color = 'gray',
alpha=.4,
linewidth=.1
)
# histogram
ax4.hist(all_sims_lengs[1][0,:], bins=16, label='first epoch',
alpha=.6, density=True, color = gr_pal[-1])
ax4.title.set_text("")
ax4.hist(all_sims_lengs[1][-1,:], bins=16, label="last epoch",
alpha=.6, density=True, color = gr_pal[3])
#ax3.legend(prop={'size': 20})
# input gate
labels = ['first epoch','last epoch']
ep_colors = [gr_pal[-1],gr_pal[3]]
conds = [0,-1]
for idx, cond in enumerate(conds):
upper = np.add(Log_caches_[cond].flatten(), inpt_sem[cond].flatten())
lower = np.subtract(Log_caches_[cond].flatten(), inpt_sem[cond].flatten())
ax5.fill_between(np.arange(T_part),
upper,
lower,
alpha=.3,
linewidth=.1,
color = ep_colors[idx]
)
ax5.plot(
|
np.arange(T_part)
|
numpy.arange
|
"""
Created on July 2020.
@author: <NAME> <<EMAIL>> https://github.com/aminheydarshahi/
"""
import numpy as np
import cv2
from util import *
class OpticalFlowLK:
def __init__(self, winsize, epsilon, iterations):
self.winsize = winsize
self.epsilon = epsilon
self.iterations = iterations
def compute(self, prevImg, nextImg, prevPts):
assert prevImg.size != 0 and nextImg.size != 0, "check prevImg and nextImg"
assert prevImg.shape[0] == nextImg.shape[0], "size mismatch, rows."
assert prevImg.shape[1] == nextImg.shape[1], "size mismatch, cols."
N = prevPts.shape[0]
status = np.ones(N, dtype=int)
nextPts =
|
np.copy(prevPts)
|
numpy.copy
|
# -*- coding:utf-8 -*-
__author__ = 'shichao'
import socket
import threading
import struct
import time
import cv2
import numpy
class Carame_Accept_Object:
def __init__(self, S_addr_port=("", 8880)):
self.resolution = (640, 480) # 分辨率
self.img_fps = 15 # 每秒传输多少帧数
self.addr_port = S_addr_port
self.Set_Socket(self.addr_port)
# 设置套接字
def Set_Socket(self, S_addr_port):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # 端口可复用
self.server.bind(S_addr_port)
self.server.listen(5)
# print("the process work in the port:%d" % S_addr_port[1])
def check_option(object, client):
# 按格式解码,确定帧数和分辨率
info = struct.unpack('lhh', client.recv(12))
if info[0] > 888:
object.img_fps = int(info[0]) - 888 # 获取帧数
object.resolution = list(object.resolution)
# 获取分辨率
object.resolution[0] = info[1]
object.resolution[1] = info[2]
object.resolution = tuple(object.resolution)
return 1
else:
return 0
def RT_Image(object, client, D_addr):
if (check_option(object, client) == 0):
return
camera = cv2.VideoCapture('/Users/shichao/Downloads/video/3A1.mp4') # 从摄像头中获取视频
img_param = [int(cv2.IMWRITE_JPEG_QUALITY), object.img_fps] # 设置传送图像格式、帧数
while (1):
time.sleep(0.1) # 推迟线程运行0.1s
_, object.img = camera.read() # 读取视频每一帧
object.img = cv2.resize(object.img, object.resolution) # 按要求调整图像大小(resolution必须为元组)
_, img_encode = cv2.imencode('.jpg', object.img, img_param) # 按格式生成图片
img_code =
|
numpy.array(img_encode)
|
numpy.array
|
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import argparse
import os.path as osp
import numpy as np
from tensorboardX import SummaryWriter
# from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
from reid import data_manager
from reid.dataset_loader import ImageDataset, VideoDataset, VideoDataset_time
from reid import transforms as T
from reid import models
from reid.losses import CrossEntropyLabelSmooth, TripletLoss, DeepSupervision
from reid.utils.iotools import save_checkpoint, check_isfile
from reid.utils.avgmeter import AverageMeter
from reid.utils.logger import Logger
from reid.utils.torchtools import count_num_param
from reid.eval_metrics import evaluate
from reid.samplers import RandomIdentitySampler, ClassIdentitySampler
from reid.optimizers import init_optim
parser = argparse.ArgumentParser(description='Train video model with cross entropy loss')
# Datasets
parser.add_argument('--root', type=str, default='/gdata1/xieqk/reid-dataset',
help="root path to data directory")
parser.add_argument('-d', '--dataset', type=str, default='dukemtmcreid-tracklet',
choices=data_manager.get_names())
parser.add_argument('-j', '--workers', default=8, type=int,
help="number of data loading workers (default: 8)")
parser.add_argument('--height', type=int, default=256,
help="height of an image (default: 256)")
parser.add_argument('--width', type=int, default=128,
help="width of an image (default: 128)")
parser.add_argument('--seq-len', type=int, default=15,
help="number of images to sample in a tracklet")
# Optimization options
parser.add_argument('--optim', type=str, default='adam',
help="optimization algorithm (see optimizers.py)")
parser.add_argument('--max-epoch', default=50, type=int,
help="maximum epochs to run")
parser.add_argument('--start-epoch', default=0, type=int,
help="manual epoch number (useful on restarts)")
parser.add_argument('--train-batch', default=256, type=int,
help="train batch size")
parser.add_argument('--test-batch', default=128, type=int,
help="test batch size (number of tracklets)")
parser.add_argument('--lr', '--learning-rate', default=0.0003, type=float,
help="initial learning rate")
parser.add_argument('--stepsize', default=[10, 20, 30, 40], nargs='+', type=int,
help="stepsize to decay learning rate")
parser.add_argument('--gamma', default=0.1, type=float,
help="learning rate decay")
parser.add_argument('--weight-decay', default=5e-04, type=float,
help="weight decay (default: 5e-04)")
parser.add_argument('--margin', type=float, default=0.3,
help="margin for triplet loss")
parser.add_argument('--num-instances', type=int, default=4,
help="number of instances per identity")
# Architecture
parser.add_argument('-a', '--arch', type=str, default='resnet50', choices=models.get_names())
parser.add_argument('--pool', type=str, default='avg', choices=['avg', 'max'])
# Miscs
parser.add_argument('--print-freq', type=int, default=10,
help="print frequency")
parser.add_argument('--seed', type=int, default=1,
help="manual seed")
parser.add_argument('--eval-step', type=int, default=10,
help="run evaluation for every N epochs (set to -1 to test after training)")
parser.add_argument('--start-eval', type=int, default=0,
help="start to evaluate after specific epoch")
parser.add_argument('--save-dir', type=str, default='')
parser.add_argument('--use-cpu', action='store_true',
help="use cpu")
parser.add_argument('--gpu-devices', '-g', default='0,1', type=str,
help='gpu device ids for CUDA_VISIBLE_DEVICES')
parser.add_argument('--use-avai-gpus', action='store_true',
help="use available gpus instead of specified devices (this is useful when using managed clusters)")
# global variables
args = parser.parse_args()
best_rank1 = -np.inf
def main():
global args, best_rank1
torch.set_num_threads(args.workers)
torch.manual_seed(args.seed)
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
if args.save_dir:
save_dir = args.save_dir
else:
save_dir = osp.join('logs', 'dukemtmcreid_s1')
writer = SummaryWriter(log_dir=save_dir)
sys.stdout = Logger(osp.join(save_dir, 'log_train.txt'))
print("==========\nArgs:{}\n==========".format(args))
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU (GPU is highly recommended)")
print("Initializing dataset {}".format(args.dataset))
dataset = data_manager.init_imgreid_dataset(root=args.root, name=args.dataset)
transform_train = T.Compose([
T.Random2DTranslation(args.height, args.width),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
transform_test = T.Compose([
T.Resize((args.height, args.width)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
pin_memory = True if use_gpu else False
# decompose tracklets into images for image-based training
train_data = dataset.get_train_tracklets()
new_train = []
for img_paths, pid, camid, _, _, _ in train_data:
for img_path in img_paths:
new_train.append((img_path, pid, camid))
trainloader = DataLoader(
ImageDataset(new_train, transform=transform_train),
sampler=ClassIdentitySampler(new_train, args.train_batch, args.num_instances),
batch_size=args.train_batch, num_workers=args.workers,
pin_memory=pin_memory, drop_last=True,
)
queryloader = DataLoader(
ImageDataset(dataset.query, transform=transform_test),
batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
pin_memory=pin_memory, drop_last=False,
)
galleryloader = DataLoader(
ImageDataset(dataset.gallery, transform=transform_test),
batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
pin_memory=pin_memory, drop_last=False,
)
print("Initializing model: {}".format(args.arch))
model = models.init_model(name=args.arch, loss={'htri'})
print("Model size: {:.3f} M".format(count_num_param(model)))
criterion_htri = TripletLoss(margin=args.margin)
optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)
if use_gpu:
model = nn.DataParallel(model).cuda()
start_time = time.time()
train_time = 0
best_epoch = args.start_epoch
print("==> Start training")
for epoch in range(args.start_epoch, args.max_epoch):
start_train_time = time.time()
# test before train
if epoch == 0:
mAP, rank1, rank5, rank10, rank20 = test(model, queryloader, galleryloader, use_gpu)
res_dict = {
'mAP': mAP,
'rank-1': rank1,
'rank-5': rank5,
'rank-10': rank10,
'rank-20': rank20,
}
writer.add_scalars('scalar/precision', res_dict, epoch)
train(epoch, model, criterion_htri, optimizer, trainloader, use_gpu, writer)
train_time += round(time.time() - start_train_time)
scheduler.step()
if (epoch + 1) > args.start_eval and args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (epoch + 1) == args.max_epoch:
print("==> Test")
mAP, rank1, rank5, rank10, rank20 = test(model, queryloader, galleryloader, use_gpu)
res_dict = {
'mAP': mAP,
'rank-1': rank1,
'rank-5': rank5,
'rank-10': rank10,
'rank-20': rank20,
}
writer.add_scalars('scalar/precision', res_dict, epoch+1)
is_best = rank1 > best_rank1
if is_best:
best_rank1 = rank1
best_epoch = epoch + 1
if use_gpu:
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
if is_best:
save_checkpoint({
'state_dict': state_dict,
'rank1': rank1,
'epoch': epoch,
}, fpath=osp.join(save_dir, 's1_best_model' + '.pth.tar'))
train_data = dataset.get_train_tracklets()
new_train = []
for img_paths, pid, camid, _, _, _ in train_data:
for img_path in img_paths:
new_train.append((img_path, pid, camid))
trainloader = DataLoader(
ImageDataset(new_train, transform=transform_train),
sampler=ClassIdentitySampler(new_train, args.train_batch, args.num_instances),
batch_size=args.train_batch, num_workers=args.workers,
pin_memory=pin_memory, drop_last=True,
)
save_checkpoint({
'state_dict': state_dict,
'rank1': rank1,
'epoch': epoch,
}, False, osp.join(save_dir, 's1_checkpoint_final' + '.pth.tar'))
print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
train_time = str(datetime.timedelta(seconds=train_time))
print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
def train(epoch, model, criterion_htri, optimizer, trainloader, use_gpu, writer):
losses = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
model.train()
end = time.time()
for batch_idx, (imgs, pids, _) in enumerate(trainloader):
data_time.update(time.time() - end)
if use_gpu:
imgs, pids = imgs.cuda(), pids.cuda()
features = model(imgs)
if isinstance(features, (tuple, list)):
loss = DeepSupervision(criterion_htri, features, pids)
else:
loss = criterion_htri(features, pids)
writer.add_scalar('scalar/loss', loss.item(), epoch*len(trainloader) + batch_idx + 1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
losses.update(loss.item(), pids.size(0))
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,
data_time=data_time, loss=losses))
end = time.time()
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
qf, q_pids, q_camids = [], [], []
for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))
gf, g_pids, g_camids = [], [], []
for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids =
|
np.asarray(g_pids)
|
numpy.asarray
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""unit tests for numpy math operations"""
import pytest
import numpy as onp
import mindspore.numpy as mnp
from mindspore import context
from mindspore.common.dtype import dtype_to_nptype
from .utils import rand_int, rand_bool, run_binop_test, run_unary_test, run_multi_test, \
run_single_test, match_res, match_array, match_meta, match_all_arrays, to_tensor
context.set_context(mode=context.PYNATIVE_MODE)
class Cases():
def __init__(self):
self.arrs = [
rand_int(2),
rand_int(2, 3),
rand_int(2, 3, 4),
]
# scalars expanded across the 0th dimension
self.scalars = [
rand_int(),
rand_int(1),
rand_int(1, 1),
]
# arrays of the same size expanded across the 0th dimension
self.expanded_arrs = [
rand_int(2, 3),
rand_int(1, 2, 3),
rand_int(1, 1, 2, 3),
]
# arrays with last dimension aligned
self.aligned_arrs = [
rand_int(2, 3),
rand_int(1, 4, 3),
rand_int(5, 1, 2, 3),
rand_int(4, 2, 1, 1, 3),
]
# arrays which can be broadcast
self.broadcastables = [
rand_int(5),
rand_int(6, 1),
rand_int(7, 1, 5),
]
# boolean arrays which can be broadcast
self.bool_broadcastables = [
rand_bool(),
rand_bool(1),
rand_bool(5),
rand_bool(6, 1),
rand_bool(7, 1, 5),
rand_bool(8, 1, 6, 1),
]
# core dimension 0 is matched for each
# pair of array[i] and array[i + 1]
self.core_broadcastables = [
rand_int(3),
rand_int(3),
rand_int(6),
rand_int(6, 4),
rand_int(5, 2),
rand_int(2),
rand_int(2, 9),
rand_int(9, 8),
rand_int(6),
rand_int(2, 6, 5),
rand_int(9, 2, 7),
rand_int(7),
rand_int(5, 2, 4),
rand_int(6, 1, 4, 9),
rand_int(7, 1, 5, 3, 2),
rand_int(8, 1, 6, 1, 2, 9),
]
# arrays with dimensions of size 1
self.nested_arrs = [
rand_int(1),
rand_int(1, 2),
rand_int(3, 1, 8),
rand_int(1, 3, 9, 1),
]
test_case = Cases()
def mnp_add(x1, x2):
return mnp.add(x1, x2)
def onp_add(x1, x2):
return onp.add(x1, x2)
def mnp_subtract(x1, x2):
return mnp.subtract(x1, x2)
def onp_subtract(x1, x2):
return onp.subtract(x1, x2)
def mnp_mutiply(x1, x2):
return mnp.multiply(x1, x2)
def onp_multiply(x1, x2):
return onp.multiply(x1, x2)
def mnp_divide(x1, x2):
return mnp.divide(x1, x2)
def onp_divide(x1, x2):
return onp.divide(x1, x2)
def mnp_true_divide(x1, x2):
return mnp.true_divide(x1, x2)
def onp_true_divide(x1, x2):
return onp.true_divide(x1, x2)
def mnp_power(x1, x2):
return mnp.power(x1, x2)
def onp_power(x1, x2):
return onp.power(x1, x2)
def mnp_float_power(x1, x2):
return mnp.float_power(x1, x2)
def onp_float_power(x1, x2):
return onp.float_power(x1, x2)
def mnp_minimum(a, b):
return mnp.minimum(a, b)
def onp_minimum(a, b):
return onp.minimum(a, b)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_add():
run_binop_test(mnp_add, onp_add, test_case)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_subtract():
run_binop_test(mnp_subtract, onp_subtract, test_case)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_multiply():
run_binop_test(mnp_mutiply, onp_multiply, test_case)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_divide():
run_binop_test(mnp_divide, onp_divide, test_case)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_true_divide():
run_binop_test(mnp_true_divide, onp_true_divide, test_case)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_power():
run_binop_test(mnp_power, onp_power, test_case, error=1e-5)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_float_power():
run_binop_test(mnp_float_power, onp_float_power, test_case, error=1e-5)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_minimum():
run_binop_test(mnp_minimum, onp_minimum, test_case)
x = onp.random.randint(-10, 10, 20).astype(onp.float32)
y = onp.random.randint(-10, 10, 20).astype(onp.float32)
x[onp.random.randint(0, 10, 3)] = onp.nan
y[onp.random.randint(0, 10, 3)] = onp.nan
x[onp.random.randint(0, 10, 3)] = onp.NINF
y[onp.random.randint(0, 10, 3)] = onp.NINF
x[onp.random.randint(0, 10, 3)] = onp.PINF
y[onp.random.randint(0, 10, 3)] = onp.PINF
match_res(mnp_minimum, onp_minimum, x, y)
match_res(mnp_minimum, onp_minimum, y, x)
def mnp_tensordot(x, y):
a = mnp.tensordot(x, y)
b = mnp.tensordot(x, y, axes=0)
c = mnp.tensordot(x, y, axes=1)
d = mnp.tensordot(x, y, axes=2)
e = mnp.tensordot(x, y, axes=(3, 0))
f = mnp.tensordot(x, y, axes=[2, 1])
g = mnp.tensordot(x, y, axes=((2, 3), (0, 1)))
h = mnp.tensordot(x, y, axes=[[3, 2], [1, 0]])
return a, b, c, d, e, f, g, h
def onp_tensordot(x, y):
a = onp.tensordot(x, y)
b = onp.tensordot(x, y, axes=0)
c = onp.tensordot(x, y, axes=1)
d = onp.tensordot(x, y, axes=2)
e = onp.tensordot(x, y, axes=(3, 0))
f = onp.tensordot(x, y, axes=[2, 1])
g = onp.tensordot(x, y, axes=((2, 3), (0, 1)))
h = onp.tensordot(x, y, axes=[[3, 2], [1, 0]])
return a, b, c, d, e, f, g, h
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_tensordot():
x = rand_int(4, 2, 7, 7)
y = rand_int(7, 7, 6)
run_multi_test(mnp_tensordot, onp_tensordot, (x, y))
def mnp_std(x):
a = mnp.std(x)
b = mnp.std(x, axis=None)
c = mnp.std(x, axis=0)
d = mnp.std(x, axis=1)
e = mnp.std(x, axis=(-1, 1))
f = mnp.std(x, axis=(0, 1, 2))
g = mnp.std(x, axis=None, ddof=1, keepdims=True)
h = mnp.std(x, axis=0, ddof=1, keepdims=True)
i = mnp.std(x, axis=(2), ddof=1, keepdims=True)
return a, b, c, d, e, f, g, h, i
def onp_std(x):
a = onp.std(x)
b = onp.std(x, axis=None)
c = onp.std(x, axis=0)
d = onp.std(x, axis=1)
e = onp.std(x, axis=(-1, 1))
f = onp.std(x, axis=(0, 1, 2))
g = onp.std(x, axis=None, ddof=1, keepdims=True)
h = onp.std(x, axis=0, ddof=1, keepdims=True)
i = onp.std(x, axis=(2), ddof=1, keepdims=True)
return a, b, c, d, e, f, g, h, i
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_std():
arr1 = rand_int(2, 3, 4, 5)
arr2 = rand_int(4, 5, 4, 3, 3)
run_single_test(mnp_std, onp_std, arr1, error=1e-5)
run_single_test(mnp_std, onp_std, arr2, error=1e-5)
def mnp_nanstd(x):
a = mnp.nanstd(x)
b = mnp.nanstd(x, axis=None)
c = mnp.nanstd(x, axis=0)
d = mnp.nanstd(x, axis=1)
e = mnp.nanstd(x, axis=(-1, 1))
f = mnp.nanstd(x, axis=(0, 1, 2))
g = mnp.nanstd(x, axis=None, ddof=1, keepdims=True)
h = mnp.nanstd(x, axis=0, ddof=1, keepdims=True)
i = mnp.nanstd(x, axis=(2), ddof=1, keepdims=True)
return a, b, c, d, e, f, g, h, i
def onp_nanstd(x):
a = onp.nanstd(x)
b = onp.nanstd(x, axis=None)
c = onp.nanstd(x, axis=0)
d = onp.nanstd(x, axis=1)
e = onp.nanstd(x, axis=(-1, 1))
f = onp.nanstd(x, axis=(0, 1, 2))
g = onp.nanstd(x, axis=None, ddof=1, keepdims=True)
h = onp.nanstd(x, axis=0, ddof=1, keepdims=True)
i = onp.nanstd(x, axis=(2), ddof=1, keepdims=True)
return a, b, c, d, e, f, g, h, i
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_nanstd():
arr1 = rand_int(2, 3, 4, 5)
arr1[0][2][1][3] = onp.nan
arr1[1][0][2][4] = onp.nan
arr1[1][1][1][1] = onp.nan
arr2 = rand_int(4, 5, 4, 3, 3)
arr2[3][1][2][1][0] = onp.nan
arr2[1][1][1][1][1] = onp.nan
arr2[0][4][3][0][2] = onp.nan
run_single_test(mnp_nanstd, onp_nanstd, arr1, error=1e-5)
run_single_test(mnp_nanstd, onp_nanstd, arr2, error=1e-5)
match_res(mnp.nanstd, onp.nanstd, rand_int())
def mnp_var(x):
a = mnp.var(x)
b = mnp.var(x, axis=0)
c = mnp.var(x, axis=(0))
d = mnp.var(x, axis=(0, 1, 2))
e = mnp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
return a, b, c, d, e
def onp_var(x):
a = onp.var(x)
b = onp.var(x, axis=0)
c = onp.var(x, axis=(0))
d = onp.var(x, axis=(0, 1, 2))
e = onp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
return a, b, c, d, e
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_var():
arr1 = rand_int(2, 3, 4, 5)
arr2 = rand_int(4, 5, 4, 3, 3)
run_single_test(mnp_var, onp_var, arr1, error=1e-5)
run_single_test(mnp_var, onp_var, arr2, error=1e-5)
def mnp_nanvar(x):
a = mnp.var(x)
b = mnp.var(x, axis=0)
c = mnp.var(x, axis=(0))
d = mnp.var(x, axis=(0, 1, 2))
e = mnp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
return a, b, c, d, e
def onp_nanvar(x):
a = onp.var(x)
b = onp.var(x, axis=0)
c = onp.var(x, axis=(0))
d = onp.var(x, axis=(0, 1, 2))
e = onp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
return a, b, c, d, e
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_nanvar():
arr1 = rand_int(2, 3, 4, 5)
arr1[0][2][1][3] = onp.nan
arr1[1][0][2][4] = onp.nan
arr1[1][1][1][1] = onp.nan
arr2 = rand_int(4, 5, 4, 3, 3)
arr2[3][1][2][1][0] = onp.nan
arr2[1][1][1][1][1] = onp.nan
arr2[0][4][3][0][2] = onp.nan
run_single_test(mnp_nanvar, onp_nanvar, arr1, error=1e-5)
run_single_test(mnp_nanvar, onp_nanvar, arr2, error=1e-5)
match_res(mnp.nanvar, onp.nanvar, rand_int())
def mnp_average(x):
a = mnp.average(x)
b = mnp.average(x, axis=None)
c = mnp.average(x, axis=0)
d = mnp.average(x, axis=1)
e = mnp.average(x, axis=(-2, 1))
f = mnp.average(x, axis=(0, 1, 2, 3))
g = mnp.average(x, axis=None, weights=x)
h = mnp.average(x, axis=0, weights=x)
i = mnp.average(x, axis=(1, 2, 3), weights=x)
return a, b, c, d, e, f, g, h, i
def onp_average(x):
a = onp.average(x)
b = onp.average(x, axis=None)
c = onp.average(x, axis=0)
d = onp.average(x, axis=1)
e = onp.average(x, axis=(-2, 1))
f = onp.average(x, axis=(0, 1, 2, 3))
g = onp.average(x, axis=None, weights=x)
h = onp.average(x, axis=0, weights=x)
i = onp.average(x, axis=(1, 2, 3), weights=x)
return a, b, c, d, e, f, g, h, i
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_average():
arr1 = rand_int(2, 3, 4, 5)
arr2 = rand_int(4, 5, 1, 3, 1)
run_single_test(mnp_average, onp_average, arr1, error=1e-5)
run_single_test(mnp_average, onp_average, arr2, error=1e-5)
def mnp_count_nonzero(x):
a = mnp.count_nonzero(x)
b = mnp.count_nonzero(x, axis=None)
c = mnp.count_nonzero(x, axis=0)
d = mnp.count_nonzero(x, axis=1)
e = mnp.count_nonzero(x, axis=(-2, 1))
f = mnp.count_nonzero(x, axis=(0, 1, 2, 3))
return a, b, c, d, e, f
def onp_count_nonzero(x):
a = onp.count_nonzero(x)
b = onp.count_nonzero(x, axis=None)
c = onp.count_nonzero(x, axis=0)
d = onp.count_nonzero(x, axis=1)
e = onp.count_nonzero(x, axis=(-2, 1))
f = onp.count_nonzero(x, axis=(0, 1, 2, 3))
return a, b, c, d, e, f
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_count_nonzero():
# minus 5 to make some values below zero
arr1 = rand_int(2, 3, 4, 5) - 5
arr2 = rand_int(4, 5, 4, 3, 3) - 5
run_single_test(mnp_count_nonzero, onp_count_nonzero, arr1)
run_single_test(mnp_count_nonzero, onp_count_nonzero, arr2)
def mnp_inner(a, b):
return mnp.inner(a, b)
def onp_inner(a, b):
return onp.inner(a, b)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_inner():
for arr1 in test_case.aligned_arrs:
for arr2 in test_case.aligned_arrs:
match_res(mnp_inner, onp_inner, arr1, arr2)
for scalar1 in test_case.scalars:
for scalar2 in test_case.scalars:
match_res(mnp_inner, onp_inner,
scalar1, scalar2)
def mnp_dot(a, b):
return mnp.dot(a, b)
def onp_dot(a, b):
return onp.dot(a, b)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dot():
# test case (1D, 1D)
match_res(mnp_dot, onp_dot, rand_int(3), rand_int(3))
# test case (2D, 2D)
match_res(mnp_dot, onp_dot, rand_int(4, 7), rand_int(7, 2))
# test case (0D, _) (_, 0D)
match_res(mnp_dot, onp_dot, rand_int(), rand_int(1, 9, 3))
match_res(mnp_dot, onp_dot, rand_int(8, 5, 6, 3), rand_int())
# test case (ND, 1D)
match_res(mnp_dot, onp_dot, rand_int(2, 4, 5), rand_int(5))
# test case (ND, MD)
match_res(mnp_dot, onp_dot, rand_int(5, 4, 1, 8), rand_int(8, 3))
for i in range(8):
match_res(mnp_dot, onp_dot,
test_case.core_broadcastables[2*i], test_case.core_broadcastables[2*i + 1])
def mnp_outer(a, b):
return mnp.outer(a, b)
def onp_outer(a, b):
return onp.outer(a, b)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_outer():
run_binop_test(mnp_outer, onp_outer, test_case)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_type_promotion():
arr = rand_int(2, 3)
onp_res = onp_add(arr, arr)
a = to_tensor(arr, dtype=mnp.float16)
b = to_tensor(arr, dtype=mnp.float32)
c = to_tensor(arr, dtype=mnp.int32)
match_array(mnp_add(a, b).asnumpy(), onp_res)
match_array(mnp_add(b, c).asnumpy(), onp_res)
def mnp_absolute(x):
return mnp.absolute(x)
def onp_absolute(x):
return onp.absolute(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_absolute():
arr = rand_int(2, 3)
a = to_tensor(arr, dtype=mnp.float16)
b = to_tensor(arr, dtype=mnp.float32)
c = to_tensor(arr, dtype=mnp.uint8)
d = to_tensor(arr, dtype=mnp.bool_)
match_array(mnp_absolute(a).asnumpy(), onp_absolute(a.asnumpy()))
match_array(mnp_absolute(b).asnumpy(), onp_absolute(b.asnumpy()))
match_array(mnp_absolute(c).asnumpy(), onp_absolute(c.asnumpy()))
match_array(mnp_absolute(d).asnumpy(), onp_absolute(d.asnumpy()))
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_deg2rad_rad2deg():
arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
for arr in arrs:
match_res(mnp.deg2rad, onp.deg2rad, arr)
match_res(mnp.rad2deg, onp.rad2deg, arr)
def mnp_ptp(x):
a = mnp.ptp(x)
b = mnp.ptp(x, keepdims=True)
c = mnp.ptp(x, axis=(0, 1))
d = mnp.ptp(x, axis=-1)
return a, b, c, d
def onp_ptp(x):
a = onp.ptp(x)
b = onp.ptp(x, keepdims=True)
c = onp.ptp(x, axis=(0, 1))
d = onp.ptp(x, axis=-1)
return a, b, c, d
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_ptp():
arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
for arr in arrs:
match_res(mnp_ptp, onp_ptp, arr)
def mnp_add_dtype(x1, x2):
return mnp.add(x1, x2, dtype=mnp.float32)
def onp_add_dtype(x1, x2):
return onp.add(x1, x2, dtype=onp.float32)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_add_dtype():
x1 = rand_int(2, 3).astype('int32')
x2 = rand_int(2, 3).astype('int32')
arrs = (x1, x2)
mnp_arrs = map(to_tensor, arrs)
mnp_res = mnp_add_dtype(*mnp_arrs)
onp_res = onp_add_dtype(*arrs)
for actual, expected in zip(mnp_res, onp_res):
assert actual.asnumpy().dtype == expected.dtype
def mnp_matmul(x1, x2):
return mnp.matmul(x1, x2)
def onp_matmul(x1, x2):
return onp.matmul(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_matmul():
for scalar1 in test_case.scalars[1:]:
for scalar2 in test_case.scalars[1:]:
match_res(mnp_matmul, onp_matmul,
scalar1, scalar2)
for i in range(8):
match_res(mnp_matmul, onp_matmul,
test_case.core_broadcastables[2*i],
test_case.core_broadcastables[2*i + 1])
def mnp_square(x):
return mnp.square(x)
def onp_square(x):
return onp.square(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_square():
run_unary_test(mnp_square, onp_square, test_case)
def mnp_sqrt(x):
return mnp.sqrt(x)
def onp_sqrt(x):
return onp.sqrt(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_sqrt():
run_unary_test(mnp_sqrt, onp_sqrt, test_case)
def mnp_reciprocal(x):
return mnp.reciprocal(x)
def onp_reciprocal(x):
return onp.reciprocal(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_reciprocal():
run_unary_test(mnp_reciprocal, onp_reciprocal, test_case)
def mnp_log(x):
return mnp.log(x)
def onp_log(x):
return onp.log(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_log():
run_unary_test(mnp.log, onp.log, test_case, error=1e-5)
def mnp_log1p(x):
return mnp.log1p(x)
def onp_log1p(x):
return onp.log1p(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_log1p():
run_unary_test(mnp_log1p, onp_log1p, test_case, error=1e-5)
def mnp_logaddexp(x1, x2):
return mnp.logaddexp(x1, x2)
def onp_logaddexp(x1, x2):
return onp.logaddexp(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_logaddexp():
test_cases = [
onp.random.randint(1, 5, (2)).astype('float16'),
onp.random.randint(1, 5, (3, 2)).astype('float16'),
onp.random.randint(1, 5, (1, 3, 2)).astype('float16'),
onp.random.randint(1, 5, (5, 6, 3, 2)).astype('float16')]
for _, x1 in enumerate(test_cases):
for _, x2 in enumerate(test_cases):
expected = onp_logaddexp(x1, x2)
actual = mnp_logaddexp(to_tensor(x1), to_tensor(x2))
onp.testing.assert_almost_equal(actual.asnumpy().tolist(), expected.tolist(),
decimal=2)
def mnp_log2(x):
return mnp.log2(x)
def onp_log2(x):
return onp.log2(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_log2():
run_unary_test(mnp_log2, onp_log2, test_case, error=1e-5)
def mnp_logaddexp2(x1, x2):
return mnp.logaddexp2(x1, x2)
def onp_logaddexp2(x1, x2):
return onp.logaddexp2(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_logaddexp2():
test_cases = [
onp.random.randint(1, 5, (2)).astype('float16'),
onp.random.randint(1, 5, (3, 2)).astype('float16'),
onp.random.randint(1, 5, (1, 3, 2)).astype('float16'),
onp.random.randint(1, 5, (5, 6, 3, 2)).astype('float16')]
for _, x1 in enumerate(test_cases):
for _, x2 in enumerate(test_cases):
expected = onp_logaddexp2(x1, x2)
actual = mnp_logaddexp2(to_tensor(x1), to_tensor(x2))
onp.testing.assert_almost_equal(actual.asnumpy().tolist(), expected.tolist(),
decimal=2)
def mnp_log10(x):
return mnp.log10(x)
def onp_log10(x):
return onp.log10(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_log10():
run_unary_test(mnp_log10, onp_log10, test_case, error=1e-5)
def mnp_maximum(x1, x2):
return mnp.maximum(x1, x2)
def onp_maximum(x1, x2):
return onp.maximum(x1, x2)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maximum():
run_binop_test(mnp_maximum, onp_maximum, test_case)
x = onp.random.randint(-10, 10, 20).astype(onp.float32)
y = onp.random.randint(-10, 10, 20).astype(onp.float32)
x[onp.random.randint(0, 10, 3)] = onp.nan
y[onp.random.randint(0, 10, 3)] = onp.nan
x[onp.random.randint(0, 10, 3)] = onp.NINF
y[onp.random.randint(0, 10, 3)] = onp.NINF
x[onp.random.randint(0, 10, 3)] = onp.PINF
y[onp.random.randint(0, 10, 3)] = onp.PINF
match_res(mnp_maximum, onp_maximum, x, y)
match_res(mnp_maximum, onp_maximum, y, x)
def mnp_clip(x):
a = mnp.clip(x, to_tensor(10.0), to_tensor([2,]))
b = mnp.clip(x, 0, 1)
c = mnp.clip(x, to_tensor(0), to_tensor(10), dtype=mnp.float32)
d = x.clip(to_tensor(10.0), to_tensor([2,]))
e = x.clip(0, 1)
f = x.clip(to_tensor(0), to_tensor(10), dtype=mnp.float32)
return a, b, c, d, e, f
def onp_clip(x):
a = onp.clip(x, onp.asarray(10.0), onp.asarray([2,]))
b = onp.clip(x, 0, 1)
c = onp.clip(x, onp.asarray(0), onp.asarray(10), dtype=onp.float32)
d = x.clip(onp.asarray(10.0), onp.asarray([2,]))
e = x.clip(0, 1)
f = x.clip(onp.asarray(0), onp.asarray(10), dtype=onp.float32)
return a, b, c, d, e, f
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_clip():
run_unary_test(mnp_clip, onp_clip, test_case)
def mnp_amax(x, mask):
a = mnp.amax(x)
b = mnp.amax(x, axis=-3)
c = mnp.amax(x, keepdims=True)
d = mnp.amax(x, initial=3)
e = mnp.amax(x, axis=(0, 1), keepdims=True)
f = mnp.amax(x, initial=4, where=mask)
g = mnp.amax(x, initial=5, where=mask, keepdims=True)
h = mnp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
return a, b, c, d, e, f, g, h
def onp_amax(x, mask):
a = onp.amax(x)
b = onp.amax(x, axis=-3)
c = onp.amax(x, keepdims=True)
d = onp.amax(x, initial=3)
e = onp.amax(x, axis=(0, 1), keepdims=True)
f = onp.amax(x, initial=4, where=mask)
g = onp.amax(x, initial=5, where=mask, keepdims=True)
h = onp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
return a, b, c, d, e, f, g, h
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_amax():
a = rand_int(2, 3, 4, 5).astype('float32')
mask = rand_bool(2, 3, 4, 5)
run_multi_test(mnp_amax, onp_amax, (a, mask))
match_res(mnp.amax, onp.amax, rand_int())
def mnp_amin(x, mask):
a = mnp.amin(x)
b = mnp.amin(x, axis=-3)
c = mnp.amin(x, keepdims=True)
d = mnp.amin(x, initial=-1)
e = mnp.amin(x, axis=(0, 1), keepdims=True)
f = mnp.amin(x, initial=-2)
g = mnp.amin(x, initial=-3, keepdims=True)
h = mnp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
return a, b, c, d, e, f, g, h
def onp_amin(x, mask):
a = onp.amin(x)
b = onp.amin(x, axis=-3)
c = onp.amin(x, keepdims=True)
d = onp.amin(x, initial=-1)
e = onp.amin(x, axis=(0, 1), keepdims=True)
f = onp.amin(x, initial=-2)
g = onp.amin(x, initial=-3, keepdims=True)
h = onp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
return a, b, c, d, e, f, g, h
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_amin():
a = rand_int(2, 3, 4, 5).astype('float32')
mask = rand_bool(2, 3, 4, 5)
run_multi_test(mnp_amin, onp_amin, (a, mask))
match_res(mnp.amin, onp.amin, rand_int())
def mnp_hypot(x1, x2):
return mnp.hypot(x1, x2)
def onp_hypot(x1, x2):
return onp.hypot(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_hypot():
run_binop_test(mnp_hypot, onp_hypot, test_case)
def mnp_heaviside(x1, x2):
return mnp.heaviside(x1, x2)
def onp_heaviside(x1, x2):
return onp.heaviside(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_heaviside():
broadcastables = test_case.broadcastables
for b1 in broadcastables:
for b2 in broadcastables:
b = onp.subtract(b1, b2)
match_res(mnp_heaviside, onp_heaviside, b, b1)
match_res(mnp_heaviside, onp_heaviside, b, b2)
def mnp_floor(x):
return mnp.floor(x)
def onp_floor(x):
return onp.floor(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_floor():
run_unary_test(mnp_floor, onp_floor, test_case)
x = rand_int(2, 3) * onp.random.rand(2, 3)
match_res(mnp_floor, onp_floor, x)
match_res(mnp_floor, onp_floor, -x)
def mnp_floor_divide(x, y):
return mnp.floor_divide(x, y)
def onp_floor_divde(x, y):
return onp.floor_divide(x, y)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_floor_divide():
run_binop_test(mnp_floor_divide, onp_floor_divde, test_case)
def mnp_remainder(x, y):
return mnp.remainder(x, y)
def onp_remainder(x, y):
return onp.remainder(x, y)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_remainder():
x = rand_int(2, 3)
y = rand_int(2, 3)
match_res(mnp_remainder, onp_remainder, x, y)
def mnp_mod(x, y):
return mnp.mod(x, y)
def onp_mod(x, y):
return onp.mod(x, y)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_mod():
x = rand_int(2, 3)
y = rand_int(2, 3)
match_res(mnp_mod, onp_mod, x, y)
def mnp_fmod(x, y):
return mnp.fmod(x, y)
def onp_fmod(x, y):
return onp.fmod(x, y)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_fmod():
x = rand_int(2, 3)
y = rand_int(2, 3)
match_res(mnp_fmod, onp_fmod, x, y)
def mnp_fix(x):
return mnp.fix(x)
def onp_fix(x):
return onp.fix(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_fix():
x = rand_int(2, 3)
y = rand_int(2, 3)
floats = onp.divide(onp.subtract(x, y), y)
match_res(mnp_fix, onp_fix, floats, error=1e-5)
def mnp_trunc(x):
return mnp.trunc(x)
def onp_trunc(x):
return onp.trunc(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_trunc():
x = rand_int(2, 3)
y = rand_int(2, 3)
floats = onp.divide(onp.subtract(x, y), y)
match_res(mnp_trunc, onp_trunc, floats, error=1e-5)
def mnp_exp(x):
return mnp.exp(x)
def onp_exp(x):
return onp.exp(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_exp():
run_unary_test(mnp_exp, onp_exp, test_case, error=5)
def mnp_expm1(x):
return mnp.expm1(x)
def onp_expm1(x):
return onp.expm1(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_expm1():
run_unary_test(mnp_expm1, onp_expm1, test_case, error=5)
def mnp_exp2(x):
return mnp.exp2(x)
def onp_exp2(x):
return onp.exp2(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_exp2():
run_unary_test(mnp_exp2, onp_exp2, test_case, error=5)
def mnp_kron(x, y):
return mnp.kron(x, y)
def onp_kron(x, y):
return onp.kron(x, y)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_kron():
run_binop_test(mnp_kron, onp_kron, test_case)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cross():
x = onp.arange(8).reshape(2, 2, 1, 2)
y = onp.arange(4).reshape(1, 2, 2)
match_res(mnp.cross, onp.cross, x, y)
match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2)
match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2, axis=1)
x = onp.arange(18).reshape(2, 3, 1, 3)
y = onp.arange(9).reshape(1, 3, 3)
match_res(mnp.cross, onp.cross, x, y)
match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2)
match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2, axis=1)
def mnp_ceil(x):
return mnp.ceil(x)
def onp_ceil(x):
return onp.ceil(x)
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_ceil():
run_unary_test(mnp_ceil, onp_ceil, test_case)
def mnp_positive(x):
return mnp.positive(x)
def onp_positive(x):
return onp.positive(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_positive():
arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
onp_pos = onp_positive(arr)
mnp_pos = mnp_positive(to_tensor(arr))
match_array(mnp_pos.asnumpy(), onp_pos)
def mnp_negative(x):
return mnp.negative(x)
def onp_negative(x):
return onp.negative(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_negative():
arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
onp_neg = onp_negative(arr)
mnp_neg = mnp_negative(to_tensor(arr))
match_array(mnp_neg.asnumpy(), onp_neg, 1e-5)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cumsum():
x = mnp.ones((16, 16), dtype="bool")
match_array(mnp.cumsum(x).asnumpy(), onp.cumsum(x.asnumpy()))
match_array(mnp.cumsum(x, axis=0).asnumpy(),
onp.cumsum(x.asnumpy(), axis=0))
match_meta(mnp.cumsum(x).asnumpy(), onp.cumsum(x.asnumpy()))
x = rand_int(3, 4, 5)
match_array(mnp.cumsum(to_tensor(x), dtype="bool").asnumpy(),
onp.cumsum(x, dtype="bool"))
match_array(mnp.cumsum(to_tensor(x), axis=-1).asnumpy(),
onp.cumsum(x, axis=-1))
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_promote_types():
assert mnp.promote_types(mnp.int32, mnp.bool_) == mnp.int32
assert mnp.promote_types(int, mnp.bool_) == mnp.int32
assert mnp.promote_types("float32", mnp.int64) == mnp.float32
assert mnp.promote_types(mnp.int64, mnp.float16) == mnp.float16
assert mnp.promote_types(int, float) == mnp.float32
def mnp_diff(input_tensor):
a = mnp.diff(input_tensor, 2, append=3.0)
b = mnp.diff(input_tensor, 4, prepend=6, axis=-2)
c = mnp.diff(input_tensor, 0, append=3.0, axis=-1)
d = mnp.diff(input_tensor, 1, prepend=input_tensor)
e = mnp.ediff1d(input_tensor, to_end=input_tensor)
f = mnp.ediff1d(input_tensor)
g = mnp.ediff1d(input_tensor, to_begin=3)
return a, b, c, d, e, f, g
def onp_diff(input_array):
a = onp.diff(input_array, 2, append=3.0)
b = onp.diff(input_array, 4, prepend=6, axis=-2)
c = onp.diff(input_array, 0, append=3.0, axis=-1)
d = onp.diff(input_array, 1, prepend=input_array)
e = onp.ediff1d(input_array, to_end=input_array)
f = onp.ediff1d(input_array)
g = onp.ediff1d(input_array, to_begin=3)
return a, b, c, d, e, f, g
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_diff():
arr = rand_int(3, 4, 5)
match_res(mnp_diff, onp_diff, arr)
arr = rand_int(1, 4, 6, 3)
match_res(mnp_diff, onp_diff, arr)
def mnp_sin(x):
return mnp.sin(x)
def onp_sin(x):
return onp.sin(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_sin():
arr = onp.random.rand(2, 3, 4).astype('float32')
expect = onp_sin(arr)
actual = mnp_sin(to_tensor(arr))
match_array(actual.asnumpy(), expect, error=5)
def mnp_cos(x):
return mnp.cos(x)
def onp_cos(x):
return onp.cos(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cos():
arr = onp.random.rand(2, 3, 4).astype('float32')
expect = onp_cos(arr)
actual = mnp_cos(to_tensor(arr))
match_array(actual.asnumpy(), expect, error=5)
def mnp_tan(x):
return mnp.tan(x)
def onp_tan(x):
return onp.tan(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_tan():
arr = onp.array([-0.75, -0.5, 0, 0.5, 0.75]).astype('float32')
expect = onp_tan(arr)
actual = mnp_tan(to_tensor(arr))
match_array(actual.asnumpy(), expect, error=5)
def mnp_arcsin(x):
return mnp.arcsin(x)
def onp_arcsin(x):
return onp.arcsin(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_arcsin():
arr = onp.random.uniform(-1, 1, 12).astype('float32')
onp_asin = onp_arcsin(arr)
mnp_asin = mnp_arcsin(to_tensor(arr))
match_array(mnp_asin.asnumpy(), onp_asin, error=3)
def mnp_arccos(x):
return mnp.arccos(x)
def onp_arccos(x):
return onp.arccos(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_arccos():
arr = onp.random.uniform(-1, 1, 12).astype('float32')
onp_acos = onp_arccos(arr)
mnp_acos = mnp_arccos(to_tensor(arr))
match_array(mnp_acos.asnumpy(), onp_acos, error=2)
def mnp_arctan(x):
return mnp.arctan(x)
def onp_arctan(x):
return onp.arctan(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_arctan():
arr = onp.random.uniform(-1, 1, 12).astype('float32')
onp_atan = onp_arctan(arr)
mnp_atan = mnp_arctan(to_tensor(arr))
match_array(mnp_atan.asnumpy(), onp_atan, error=5)
def mnp_sinh(x):
return mnp.sinh(x)
def onp_sinh(x):
return onp.sinh(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_sinh():
arr = onp.random.rand(2, 3, 4).astype('float32')
expect = onp_sinh(arr)
actual = mnp_sinh(to_tensor(arr))
match_array(actual.asnumpy(), expect, error=5)
def mnp_cosh(x):
return mnp.cosh(x)
def onp_cosh(x):
return onp.cosh(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cosh():
arr = onp.random.rand(2, 3, 4).astype('float32')
expect = onp_cosh(arr)
actual = mnp_cosh(to_tensor(arr))
match_array(actual.asnumpy(), expect, error=5)
def mnp_tanh(x):
return mnp.tanh(x)
def onp_tanh(x):
return onp.tanh(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_tanh():
arr = onp.random.rand(2, 3, 4).astype('float32')
expect = onp_tanh(arr)
actual = mnp_tanh(to_tensor(arr))
match_array(actual.asnumpy(), expect, error=5)
def mnp_arcsinh(x):
return mnp.arcsinh(x)
def onp_arcsinh(x):
return onp.arcsinh(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_arcsinh():
arr = onp.random.rand(2, 3, 4).astype('float32')
expect = onp_arcsinh(arr)
actual = mnp_arcsinh(to_tensor(arr))
match_array(actual.asnumpy(), expect, error=5)
def mnp_arccosh(x):
return mnp.arccosh(x)
def onp_arccosh(x):
return onp.arccosh(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_arccosh():
arr = onp.random.randint(1, 100, size=(2, 3)).astype('float32')
expect = onp_arccosh(arr)
actual = mnp_arccosh(to_tensor(arr))
match_array(actual.asnumpy(), expect, error=5)
def mnp_arctanh(x):
return mnp.arctanh(x)
def onp_arctanh(x):
return onp.arctanh(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_arctanh():
arr = onp.random.uniform(-0.9, 1, 10).astype('float32')
expect = onp_arctanh(arr)
actual = mnp_arctanh(to_tensor(arr))
match_array(actual.asnumpy(), expect, error=5)
def mnp_arctan2(x, y):
return mnp.arctan2(x, y)
def onp_arctan2(x, y):
return onp.arctan2(x, y)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_arctan2():
run_binop_test(mnp_arctan2, onp_arctan2, test_case, error=5)
def mnp_convolve(mode):
a = mnp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
b = mnp.convolve([1, 2, 3, 4, 5], [2, 3], mode=mode)
c = mnp.convolve([1, 2], [2, 5, 10], mode=mode)
d = mnp.convolve(mnp.array([1, 2, 3, 4, 5]), mnp.array([1, 2, 3, 4, 5]), mode=mode)
e = mnp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
return a, b, c, d, e
def onp_convolve(mode):
a = onp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
b = onp.convolve([1, 2, 3, 4, 5], [2, 3], mode=mode)
c = onp.convolve([1, 2], [2, 5, 10], mode=mode)
d = onp.convolve(onp.array([1, 2, 3, 4, 5]), onp.array([1, 2, 3, 4, 5]), mode=mode)
e = onp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
return a, b, c, d, e
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_convolve():
for mode in ['full', 'same', 'valid']:
mnp_res = mnp_convolve(mode)
onp_res = onp_convolve(mode)
match_all_arrays(mnp_res, onp_res)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cov():
x = onp.random.random((3, 4)).tolist()
mnp_res = mnp.cov(x)
onp_res = onp.cov(x)
match_all_arrays(mnp_res, onp_res, error=1e-5)
mnp_res = mnp.cov(x[0])
onp_res = onp.cov(x[0])
match_all_arrays(mnp_res, onp_res, error=1e-5)
w1 = [0, 1, 2, 3]
w2 = [4, 5, 6, 7]
mnp_res = mnp.cov(x, fweights=w1)
onp_res = onp.cov(x, fweights=w1)
match_all_arrays(mnp_res, onp_res, error=1e-5)
mnp_res = mnp.cov(x, aweights=w2)
onp_res = onp.cov(x, aweights=w2)
match_all_arrays(mnp_res, onp_res, error=1e-5)
mnp_res = mnp.cov(x, fweights=w1, aweights=w2)
onp_res = onp.cov(x, fweights=w1, aweights=w2)
match_all_arrays(mnp_res, onp_res, error=1e-5)
mnp_res = mnp.cov(x, fweights=w1, aweights=w2, ddof=3)
onp_res = onp.cov(x, fweights=w1, aweights=w2, ddof=3)
match_all_arrays(mnp_res, onp_res, error=1e-5)
mnp_res = mnp.cov(x, fweights=w1, aweights=w2, bias=True)
onp_res = onp.cov(x, fweights=w1, aweights=w2, bias=True)
match_all_arrays(mnp_res, onp_res, error=1e-5)
mnp_res = mnp.cov(x, fweights=w1[0:3], aweights=w2[0:3], rowvar=False, bias=True)
onp_res = onp.cov(x, fweights=w1[0:3], aweights=w2[0:3], rowvar=False, bias=True)
match_all_arrays(mnp_res, onp_res, error=1e-5)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_trapz():
y = rand_int(2, 3, 4, 5)
match_res(mnp.trapz, onp.trapz, y)
match_res(mnp.trapz, onp.trapz, y, x=[-5, -3, 0, 7, 10])
match_res(mnp.trapz, onp.trapz, y, dx=2, axis=3)
match_res(mnp.trapz, onp.trapz, y, x=[1, 5, 6, 9], dx=3, axis=-2)
def mnp_gcd(x, y):
return mnp.gcd(x, y)
def onp_gcd(x, y):
return onp.gcd(x, y)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_gcd():
x = onp.arange(-12, 12).reshape(2, 3, 4)
y = onp.arange(24).reshape(2, 3, 4)
match_res(mnp_gcd, onp_gcd, x, y)
def mnp_lcm(x, y):
return mnp.lcm(x, y)
def onp_lcm(x, y):
return onp.lcm(x, y)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_lcm():
x = onp.arange(-12, 12).reshape(2, 3, 4)
y = onp.arange(24).reshape(2, 3, 4)
match_res(mnp_lcm, onp_lcm, x, y)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_exception_innner():
with pytest.raises(ValueError):
mnp.inner(to_tensor(test_case.arrs[0]),
to_tensor(test_case.arrs[1]))
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_exception_add():
with pytest.raises(ValueError):
mnp.add(to_tensor(test_case.arrs[1]), to_tensor(test_case.arrs[2]))
def mnp_nanmax(x):
a = mnp.nanmax(x)
b = mnp.nanmax(x, keepdims=True)
c = mnp.nanmax(x, axis=-2)
d = mnp.nanmax(x, axis=0, keepdims=True)
e = mnp.nanmax(x, axis=(-2, 3))
f = mnp.nanmax(x, axis=(-3, -1), keepdims=True)
return a, b, c, d, e, f
def onp_nanmax(x):
a = onp.nanmax(x)
b = onp.nanmax(x, keepdims=True)
c = onp.nanmax(x, axis=-2)
d = onp.nanmax(x, axis=0, keepdims=True)
e = onp.nanmax(x, axis=(-2, 3))
f = onp.nanmax(x, axis=(-3, -1), keepdims=True)
return a, b, c, d, e, f
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_nanmax():
x = rand_int(2, 3, 4, 5)
x[0][2][1][3] = onp.nan
x[1][0][2][4] = onp.nan
x[1][1][1][1] = onp.nan
run_multi_test(mnp_nanmax, onp_nanmax, (x,))
def mnp_nanmin(x):
a = mnp.nanmin(x)
b = mnp.nanmin(x, keepdims=True)
c = mnp.nanmin(x, axis=-2)
d = mnp.nanmin(x, axis=0, keepdims=True)
e = mnp.nanmin(x, axis=(-2, 3))
f = mnp.nanmin(x, axis=(-3, -1), keepdims=True)
return a, b, c, d, e, f
def onp_nanmin(x):
a = onp.nanmin(x)
b = onp.nanmin(x, keepdims=True)
c = onp.nanmin(x, axis=-2)
d = onp.nanmin(x, axis=0, keepdims=True)
e = onp.nanmin(x, axis=(-2, 3))
f = onp.nanmin(x, axis=(-3, -1), keepdims=True)
return a, b, c, d, e, f
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_nanmin():
x = rand_int(2, 3, 4, 5)
x[0][2][1][3] = onp.nan
x[1][0][2][4] = onp.nan
x[1][1][1][1] = onp.nan
run_multi_test(mnp_nanmin, onp_nanmin, (x,))
def mnp_nansum(x):
a = mnp.nansum(x)
b = mnp.nansum(x, keepdims=True)
c = mnp.nansum(x, axis=-2)
d = mnp.nansum(x, axis=0, keepdims=True)
e = mnp.nansum(x, axis=(-2, 3))
f = mnp.nansum(x, axis=(-3, -1), keepdims=True)
return a, b, c, d, e, f
def onp_nansum(x):
a = onp.nansum(x)
b = onp.nansum(x, keepdims=True)
c = onp.nansum(x, axis=-2)
d = onp.nansum(x, axis=0, keepdims=True)
e =
|
onp.nansum(x, axis=(-2, 3))
|
numpy.nansum
|
import scipy
import csv
import json
import os
import math
import numpy as np
from scipy.stats import norm
from scipy import signal
import scipy.ndimage as ndimage
from at_synapse_detection import synaptogram
from at_synapse_detection import dataAccess as da
from at_synapse_detection import SynapseDetection as syn
def checkQueryAgainstAnno(anno, query, threshlist, win_xy, win_z, filepath):
"""
Given a manual annotation, see if a synapse can be detected.
Input is a manual annotation and a query, output is a array that
indicates at which threshold the annotation is detected or not detected
Parameters:
-------------------
anno : dict - manual synapse annotation
query : dict
threshlist : list - list of thresholds
win_xy : int - window to search in (full window is 2x)
win_z : int - window, z dimension
filepath : str - data location
Returns:
--------------------
synapseDetected : list - same size as threslist, T/F if detected
"""
synapseDetected = np.zeros(len(threshlist), dtype=bool)
# Get range of coordinates
bbox = synaptogram.getAnnotationBoundingBox2(anno)
bbox = synaptogram.transformSynapseCoordinates(bbox)
expandedBox = synaptogram.expandBoundingBox(bbox, win_xy, win_z)
zrange = list(range(expandedBox['startZ'], expandedBox['endZ']+1))
synapticVolumes = createSynapseVolumesCutout(query, anno, win_xy, win_z, filepath)
resultVol = getSynapseDetectionSSA(synapticVolumes, query)
#print('Print bbox:', bbox)
#print('Print Expanded Box: ', expandedBox)
synapsevol = annotationToBinaryVolume(resultVol.shape, anno, expandedBox, bbox, zrange)
SE = np.ones((2, 2, 2))
for n, thresh in enumerate(threshlist):
dilated_volume = ndimage.binary_dilation(resultVol > thresh, SE)
overlaidvolumes = dilated_volume + synapsevol
if overlaidvolumes.max() > 1:
synapseDetected[n] = 1
else:
synapseDetected[n] = 0
return synapseDetected
def getSynapseDetectionSSA(synapticVolumes, query, kernelLength=2, edge_win = 3,
search_win = 2):
"""
This function calls the functions needed to run probabilistic synapse detection
Parameters
----------
synapticVolumes : dict
has two keys (presynaptic,postsynaptic) which contain lists of 3D numpy arrays
query : dict
contains the minumum slice information for each channel
kernelLength : int
Minimum 2D Blob Size (default 2)
edge_win: int
Edge window (default 8)
search_win: int
Search windows (default 2)
Returns
----------
resultVol : 3D numpy array - final probability map
"""
# Data
presynapticVolumes = synapticVolumes['presynaptic']
postsynapticVolumes = synapticVolumes['postsynaptic']
# Number of slices each blob should span
preIF_z = query['preIF_z']
postIF_z = query['postIF_z']
for n in range(0, len(presynapticVolumes)):
#presynapticVolumes[n] = getProbMap(presynapticVolumes[n]) # Step 1
presynapticVolumes[n] = syn.convolveVolume(presynapticVolumes[n], kernelLength) # Step 2
if preIF_z[n] > 1:
factorVol = syn.computeFactor(presynapticVolumes[n], int(preIF_z[n])) # Step 3
presynapticVolumes[n] = presynapticVolumes[n] * factorVol
for n in range(0, len(postsynapticVolumes)):
#postsynapticVolumes[n] = getProbMap(postsynapticVolumes[n]) # Step 1
postsynapticVolumes[n] = syn.convolveVolume(postsynapticVolumes[n], kernelLength) # Step 2
if postIF_z[n] > 1:
factorVol = syn.computeFactor(postsynapticVolumes[n], int(postIF_z[n])) # Step 3
postsynapticVolumes[n] = postsynapticVolumes[n] * factorVol
# combinePrePostVolumes(base, adjacent)
# Step 4
#print(len(presynapticVolumes))
#print(len(postsynapticVolumes))
if len(postsynapticVolumes) == 0:
resultVol = syn.combinePrePostVolumes(presynapticVolumes, postsynapticVolumes, edge_win, search_win)
else:
resultVol = syn.combinePrePostVolumes(postsynapticVolumes, presynapticVolumes, edge_win, search_win)
return resultVol;
def createSynapseVolumesCutout(query, anno, win_xy, win_z, filepath):
"""
Load tiff stacks associated with a query
Parameters
----------
query : dict - object containing filenames associated with pre/post synaptic markers
filepath : str - location of data
Returns
----------
synapticVolumes : dict
dict with two (pre/post) lists of synaptic volumes
"""
bbox = synaptogram.getAnnotationBoundingBox2(anno)
bbox = synaptogram.transformSynapseCoordinates(bbox)
# query = {'preIF' : preIF, 'preIF_z' : preIF_z, 'postIF' : postIF, 'postIF_z' : postIF_z};
#presynaptic volumes
presynapticvolumes = []
preIF = query['preIF']
# Loop over every presynaptic channel
for n in range(0, len(preIF)):
#print(preIF[n])
volume = getCutoutProbVolume(bbox, win_xy, win_z, preIF[n], filepath)
presynapticvolumes.append(volume)
#postsynaptic volumes
postsynapticvolumes = []
postIF = query['postIF']
# Loop over every postsynaptic channel
for n in range(0, len(postIF)):
# print(postIF[n])
volume = getCutoutProbVolume(bbox, win_xy, win_z, postIF[n], filepath)
postsynapticvolumes.append(volume)
synapticVolumes = {'presynaptic': presynapticvolumes,
'postsynaptic': postsynapticvolumes}
return synapticVolumes
def getCutoutProbVolume(bboxCoordinates, win_xy, win_z, volname, filepath):
"""
Load a portion of image data
Parameters
-----------
bboxCoordinates : dict - coordinates of EM ennotation
win_xy : int - radius of expansion
win_z : int - z radius of expansion
volname : str - name of volume to load
filepath : str - location of data
Returns
-----------
vol : 3D Numpy array
"""
# check for boundary issues
startZ = bboxCoordinates['minZ']
if (startZ - win_z > -1):
startZ = startZ - win_z;
endZ = bboxCoordinates['maxZ']
if (endZ + win_z < 50):
endZ = endZ + win_z;
# get range of x, y values
startX = bboxCoordinates['minX'] - win_xy;
startY = bboxCoordinates['minY'] - win_xy;
deltaX = bboxCoordinates['maxX'] - startX + win_xy;
deltaY = bboxCoordinates['maxY'] - startY + win_xy;
startX = int(round(startX))
startY = int(round(startY))
deltaX = int(round(deltaX))
deltaY = int(round(deltaY))
startZ = int(round(startZ))
endZ = int(round(endZ))
numSlices = endZ - startZ + 1
# allocate volume
vol = np.zeros((deltaY, deltaX, numSlices), dtype=np.float64)
# iterate over each slice
sliceitr = 0
for sliceInd in range(startZ, endZ + 1):
cutout = synaptogram.getImageProbMapCutoutFromFile(volname, sliceInd, startX, startY, deltaX, deltaY, filepath)
vol[:, :, sliceitr] = cutout;
sliceitr = sliceitr + 1
return vol
def annotationToBinaryVolume(shape, anno, expandedBox, bbox, zrange):
"""
convert annotation to binary volume that matches the cutout size in checkQueryAgainstAnno()
Parameters:
-------------
shape : size of output volume
anno : annotation object
expandedBox :
bbox
zrange
Returns:
-------------
synapsevol : 3D Numpy
"""
synapsevol = np.zeros(shape)
synapseOutlinesDict = synaptogram.getAnnotationOutlines(anno)
synapseOutlinesDict = synaptogram.transformSynapseOutlinesDict(synapseOutlinesDict)
listOfZinds = synapseOutlinesDict['zInds']
listOfZinds = sorted(listOfZinds)
startX = expandedBox['startX']
startY = expandedBox['startY']
minX = math.floor(bbox['minX'] - startX)
maxX = math.ceil(bbox['maxX'] - startX)
minY = math.floor(bbox['minY'] - startY)
maxY = math.ceil(bbox['maxY'] - startY)
offsetZ = 0
for localZ, globalZ in enumerate(zrange):
if zrange[localZ] == listOfZinds[offsetZ]:
synapsevol[minY:maxY, minX:maxX, localZ] = 1
#print('actual range')
#print(minY, maxY, minX, maxX,localZ)
offsetZ = offsetZ + 1
if offsetZ == len(listOfZinds):
break
return synapsevol
def getSynapseDetectionsMW(synapticVolumes, query, kernelLength=2, edge_win = 3,
search_win = 2):
"""
This function calls the functions needed to run probabilistic synapse detection
Parameters
----------
synapticVolumes : dict
has two keys (presynaptic,postsynaptic) which contain lists of 3D numpy arrays
query : dict
contains the minumum slice information for each channel
kernelLength : int
Minimum 2D Blob Size (default 2)
edge_win: int
Edge window (default 8)
search_win: int
Search windows (default 2)
Returns
----------
resultVol : 3D numpy array - final probability map
"""
# Data
presynapticVolumes = synapticVolumes['presynaptic']
postsynapticVolumes = synapticVolumes['postsynaptic']
# Number of slices each blob should span
preIF_z = query['preIF_z']
postIF_z = query['postIF_z']
for n in range(0, len(presynapticVolumes)):
presynapticVolumes[n] = syn.getProbMap_MW(presynapticVolumes[n], query['preIF'][n]) # Step 1
presynapticVolumes[n] = syn.convolveVolume(presynapticVolumes[n], kernelLength) # Step 2
if preIF_z[n] > 1:
factorVol = syn.computeFactor(presynapticVolumes[n], int(preIF_z[n])) # Step 3
presynapticVolumes[n] = presynapticVolumes[n] * factorVol
for n in range(0, len(postsynapticVolumes)):
postsynapticVolumes[n] = syn.getProbMap_MW(postsynapticVolumes[n], query['postIF'][n]) # Step 1
postsynapticVolumes[n] = syn.convolveVolume(postsynapticVolumes[n], kernelLength) # Step 2
if postIF_z[n] > 1:
factorVol = syn.computeFactor(postsynapticVolumes[n], int(postIF_z[n])) # Step 3
postsynapticVolumes[n] = postsynapticVolumes[n] * factorVol
# combinePrePostVolumes(base, adjacent)
# Step 4
#print(len(presynapticVolumes))
#print(len(postsynapticVolumes))
if len(postsynapticVolumes) == 0:
resultVol = syn.combinePrePostVolumes(presynapticVolumes, postsynapticVolumes, edge_win, search_win)
else:
resultVol = syn.combinePrePostVolumes(postsynapticVolumes, presynapticVolumes, edge_win, search_win)
return resultVol;
def getProbMap_MW(data, chname, win=30, stepsize=1):
"""
Returns probability map of input image. Uses a moving window for background/foreground seperation
Saves probability map to file; if the file already exists, it reloads it and returns it
Issue: Current output location is hard coded
Parameters
----------
data : 3D numpy - input volume
chname : str - channel name
win : int - window size (default = 30)
stepsize : int - moving window step size (default = 1)
Returns
----------
data : 3D numpy
output volume with values scaled between 0 to 1
"""
#test to see if data exists # FIX filepath
outputlocation = '/Users/anish/Documents/Connectome/Synaptome-Duke/data/collman17/Site3Align2Stacks/'
fn = chname + '_probvol.npy'
fn = os.path.join(outputlocation, fn)
doesfileexist = os.path.exists(fn)
if doesfileexist:
outputvol = np.load(fn)
print("loaded file")
return outputvol
else:
print("compute prob file")
outputvol = np.zeros(data.shape)
for zInd in range(0, data.shape[2]):
img = data[:, :, zInd]
print("Calculating probability slice: ", zInd)
imgsize = img.shape
outputimg = np.zeros(imgsize)
startRow = 0
endRow = 0
oldEndRow = 0
exitRowLoop = False
for rowstep in range(0, int(np.ceil(imgsize[0]/stepsize))):
startCol = 0
endCol = 0
oldEndCol = 0
if exitRowLoop:
break
if ((startRow + win) < imgsize[0]):
endRow = startRow + win
else:
endRow = imgsize[0]
exitRowLoop = True
exitColLoop = False
for colstep in range(0, int(np.ceil(imgsize[1]/stepsize))):
if exitColLoop:
break
if ((startCol + win) < imgsize[1]):
endCol = startCol + win
else:
endCol = imgsize[1]
exitColLoop = True
cutout = img[startRow:endRow, startCol:endCol]
cutout = scipy.stats.norm.cdf(cutout, np.mean(cutout), np.std(cutout))
if oldEndRow != 0 and oldEndCol != 0:
priordata = outputimg[startRow:oldEndRow, startCol:oldEndCol]
meancutout = np.mean([priordata, cutout[0:-(endRow-oldEndRow), 0:-(endCol-oldEndCol)]], 0)
cutout[0:-(endRow-oldEndRow), 0:-(endCol-oldEndCol)] = meancutout
outputimg[startRow:endRow, startCol:endCol] = cutout
oldEndCol = endCol
startCol = startCol + stepsize
oldEndRow = endRow
startRow = startRow + stepsize
outputvol[:, :, zInd] = outputimg
np.save(fn, outputvol)
return outputvol
def loadSynapseDataFromQuery(query, anno, win_xy, win_z, filepath):
"""
Load tiff stacks associated with a query
Parameters
----------
query : dict - object containing filenames associated with pre/post synaptic markers
filepath : str - location of data
Returns
----------
synapticVolumes : dict
dict with two (pre/post) lists of synaptic volumes
"""
bbox = synaptogram.get_anno_boundingbox(anno)
bbox = synaptogram.transformSynapseCoordinates(bbox)
# query = {'preIF' : preIF, 'preIF_z' : preIF_z, 'postIF' : postIF, 'postIF_z' : postIF_z};
#presynaptic volumes
presynapticvolumes = []
preIF = query['preIF']
# Loop over every presynaptic channel
for n in range(0, len(preIF)):
#print(preIF[n])
volume = getVolume(bbox, win_xy, win_z, preIF[n], filepath)
presynapticvolumes.append(volume)
#postsynaptic volumes
postsynapticvolumes = []
postIF = query['postIF']
# Loop over every postsynaptic channel
for n in range(0, len(postIF)):
# print(postIF[n])
volume = getVolume(bbox, win_xy, win_z, postIF[n], filepath)
postsynapticvolumes.append(volume)
synapticVolumes = {'presynaptic': presynapticvolumes,
'postsynaptic': postsynapticvolumes}
return synapticVolumes
def getVolume(bboxCoordinates, win_xy, win_z, volname, filepath):
"""
Load a portion of image data
Parameters
-----------
bboxCoordinates : dict - coordinates of EM ennotation
win_xy : int - radius of expansion
win_z : int - z radius of expansion
volname : str - name of volume to load
filepath : str - location of data
Returns
-----------
vol : 3D Numpy array
"""
# check for boundary issues
startZ = bboxCoordinates['minZ']
if (startZ - win_z > -1):
startZ = startZ - win_z;
endZ = bboxCoordinates['maxZ']
if (endZ + win_z < 50):
endZ = endZ + win_z;
# get range of x, y values
startX = bboxCoordinates['minX'] - win_xy;
startY = bboxCoordinates['minY'] - win_xy;
deltaX = bboxCoordinates['maxX'] - startX + win_xy;
deltaY = bboxCoordinates['maxY'] - startY + win_xy;
startX = int(round(startX))
startY = int(round(startY))
deltaX = int(round(deltaX))
deltaY = int(round(deltaY))
startZ = int(round(startZ))
endZ = int(round(endZ))
numSlices = endZ - startZ + 1
# allocate volume
vol =
|
np.zeros((deltaY, deltaX, numSlices), dtype=np.float64)
|
numpy.zeros
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binned_statistic, sem
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
colors = [color['color'] for color in list(plt.rcParams['axes.prop_cycle'])]
import sys
# compute fast/slow state index
def calc_timings(data):
index_high = []
index_low = []
temp_index_high = []
temp_index_low = []
num_bound_high = []
bound = False
for i, (time, step, out, number_bound, number_tot) in enumerate(data):
num_bound_high.append(number_tot)
if number_bound == 0:
temp_index_high.append(i)
if not bound and i != 0:
temp_index_low.append(i)
index_low.append(temp_index_low)
temp_index_low = []
bound=True
else:
temp_index_low.append(i)
if bound:
temp_index_high.append(i)
index_high.append(temp_index_high)
temp_index_high = []
bound=False
if bound:
temp_index_high.append(i)
index_high.append(temp_index_high)
temp_index_high = []
else:
temp_index_low.append(i)
index_low.append(temp_index_low)
temp_index_low = []
return index_high, index_low, num_bound_high
kOff=0.121224
R=0.1
kA = 1.0/R
kOn = kA * kOff
dx=7.0
D0=270000.0
Dhop = 2.0*D0/(dx*dx)
fractionFree = 1.0/(1.0 + kA)
khop = Dhop/fractionFree
Ron = kOn/khop
pscale = np.sqrt((kOn + kOff)/khop) * (1.0 + 1.0/kA)
tau_high_list = []
tau_low_list = []
flux_high_list = []
flux_low_list = []
prob_list = []
n_high_list = []
n_low_list = []
flux_eqn1 = []
flux_eqn2 = []
phi_slow = []
phi_free = []
tau_slow = []
tau_free = []
width_list = [2,4,8,16,32,64,128,256,512,1024]
for id,width in enumerate(width_list):
data = np.loadtxt('../Sim/Data/TimeSeries_Width'+str(width)+'R'+str(R)+'koff'+str(kOff)+'ID'+str(id))
time, step, out, number_bound, number_total = list(data.T)
index_high,index_low, num_bound_high = calc_timings(data)
total_time = time[-1] - time[0]
nu_s = 0.55
n_plug = (width-1)/2.0
n_bound = 1 + nu_s * n_plug/(1 + 1.0/kA)
phi_slow.append( (1.0 + n_plug) / (n_bound/kOff) )
phi_free.append(khop/2.0/width)
n_free = (width+1)/2.0
tau_slow.append(n_bound/kOff)
tau_free.append(1.0/(kOn * n_free))
flux_eqn1.append(2.0*D0/(dx**2)*pscale/width/np.sqrt(3) * (np.arctan(1 + 2.0/pscale) - np.arctan(1) ) )
tau_high = np.array(list(map(lambda x: time[x][-1] - time[x][0], index_high)))
tau_low = np.array(list(map(lambda x: time[x][-1] - time[x][0], index_low)))
n_high = np.array(list(map(lambda x: out[x].sum(), index_high)))
n_low = np.array(list(map(lambda x: out[x].sum(), index_low)))
n_high = n_high[tau_high > 0]
n_low = n_low[tau_low > 0]
tau_high = tau_high[tau_high > 0]
tau_low = tau_low[tau_low > 0]
flux_high = (n_high / tau_high)
flux_low = (n_low / tau_low)
# duration weighted fluxes
flux_high_av = np.nansum(flux_high * tau_high) / np.nansum(tau_high)
flux_low_av =
|
np.nansum(flux_low * tau_low)
|
numpy.nansum
|
"""
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/pulsar/polyco.py,v 1.18 2017/03/03 20:15:14 kerrm Exp $
Mange polycos from tempo2.
Authors: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
from __future__ import division
import math
import numpy as np
import datetime
import os
import subprocess
class PolycoEntry:
STATIC_COUNTER = 0
def __init__(self,tmid,mjdspan,rphase,f0,ncoeff,coeffs,obs):
self.tmid = tmid
self.mjdspan = mjdspan
self.tstart = tmid - float(mjdspan)/2
self.tstop = tmid + float(mjdspan)/2
self.rphase = rphase
self.f0 = f0
self.ncoeff = ncoeff
self.coeffs = coeffs
self.obs = obs
self.uid = PolycoEntry.STATIC_COUNTER
PolycoEntry.STATIC_COUNTER += 1
def __str__(self):
return("PE: "+repr(self.tmid)+" "+repr(self.mjdspan)+" "+repr(self.rphase)+" "+repr(self.ncoeff)+" "+repr(self.coeffs))
def valid(self,t):
'''Return True if this polyco entry is valid for the time given (MJD)'''
return t>=(self.tmid-self.mjdspan/2.0) and t<(self.tmid+self.mjdspan/2.0)
def evalphase(self,t):
'''Return the phase at time t, computed with this polyco entry'''
dt = (t-self.tmid)*1440.0
# Compute polynomial by factoring out the dt's
phase = self.coeffs[self.ncoeff-1]
for i in range(self.ncoeff-2,-1,-1):
phase = self.coeffs[i] + dt*phase
# Add DC term
phase += self.rphase + dt*60.0*self.f0
phase -= math.floor(phase)
if phase < 0.0 or phase >= 1.0:
print ("BAD PHASE ",phase)
return(phase)
def evalabsphase(self,t):
"""Return the phase at time t, computed with this polyco entry.
This version includes the "DC" term, i.e. is the absolute phase
since the epoch."""
dt = (t-self.tmid)*1440.0
# Compute polynomial by factoring out the dt's
phase = self.coeffs[self.ncoeff-1]
for i in range(self.ncoeff-2,-1,-1):
phase = self.coeffs[i] + dt*phase
# Add DC term
phase += self.rphase + dt*60.0*self.f0
return(phase)
def evalfreq(self,t):
'''Return the freq at time t, computed with this polyco entry'''
dt = (t-self.tmid)*1440.0
s = 0.0
for i in range(1,self.ncoeff):
s += float(i) * self.coeffs[i] * dt**(i-1)
freq = self.f0 + s/60.0
return(freq)
def evalfreqderiv(self,t):
""" Return the frequency derivative at time t."""
dt = (t-self.tmid)*1440.0
s = 0.0
for i in range(2,self.ncoeff):
s += float(i) * float(i-1) * self.coeffs[i] * dt**(i-2)
freqd = s/(60.0*60.0)
return(freqd)
class Polyco:
def __init__(self, fname, psrname=None, recalc_polycos=True,
mjd0=51544,bary=False, working_dir=None, output=None, ndays=None,
verbose=False):
""" Create an object encapsulating a set of polynomial coefficients for evaluating phase.
fname -- either an existing polyco .dat file or an ephemeris
with which to generate the polycos
recalc_polycos -- force generation of polycos; fname must be an
ephemeris in this case
mjd0 -- start of polyco validity; default Jan 1, 2000
bary -- generate polycos at barycenter if set (default geo)
working_dir -- change to this directory to generate polycos
output -- use this stem to prepend to polyco .dat files
ndays -- number of days to include; default spans 2000 to
present but can be lengthy to compute
verbose -- spit out more diagnostic output
"""
self.binary_period = None
self.bary = bary
self.working_dir = working_dir
self.output = output
self.verbose = verbose
if fname.endswith( ".par" ) or recalc_polycos:
from uw.pulsar.parfiles import ParFile
pf = ParFile(fname)
self.ra = pf.get_ra()
self.dec = pf.get_dec()
self.binary_period = pf.get_binary_period()
fname = self.gen_polycos(fname,recalc_polycos=recalc_polycos,mjd0=mjd0,ndays=ndays)
else:
self.ra = self.dec = None
self.entries = []
f = open(fname,"r")
set = 0
while True:
line1 = f.readline()
if len(line1) == 0:
break
sp = line1.split()
psrname = sp[0].strip()
date = sp[1].strip()
utc = sp[2]
tmid = float(sp[3])
dm = float(sp[4])
#doppler = float(sp[5])
logrms = float(sp[6])
if verbose:
print ("- - - - - - -")
#print ("psrname %s date %s utc %s tmid %s dm %f doppler %f logrms %f" % (psrname,date,utc,tmid,dm,doppler,logrms))
print ("psrname %s date %s utc %s tmid %s dm %f logrms %f" % (psrname,date,utc,tmid,dm,logrms))
line2 = f.readline()
rphase = float(line2[0:20])
f0 = float(line2[20:38])
obs = line2[38:43].strip()
nspan = int(line2[43:49])
mjdspan = float(nspan)/(60*24)
ncoeff = int(line2[49:54])
obsfreq = float(line2[54:64])
if len(line2[75:80].strip()) > 0:
binphase = float(line2[75:80])
else:
binphase = 0.0
if verbose:
print ("rphase %s f0 %s obs %s ncoeff %d nspan %d obsfreq %f binphase %f" % (repr(rphase),repr(f0),obs,ncoeff,nspan,obsfreq,binphase))
nlines = ncoeff//3
nlast = ncoeff%3
if nlast > 0:
nlines += 1
coeffs = []
for i in range(nlines):
line = f.readline()
for c in line.split():
coeffs.append(float(c))
coeffs = np.array(coeffs)
if verbose:
print ("COEFFS: ",coeffs)
pe = PolycoEntry(tmid,mjdspan,rphase,f0,ncoeff,coeffs,obs)
self.entries.append(pe)
if len(self.entries)==0:
raise ValueError('No polycos generated!')
self.make_keys()
def gen_polycos(self,polyconame,recalc_polycos=True,mjd0=51544,ndays=None):
"""If par file passed in, generate polyco file on the fly."""
# get MJDs
if ndays is None:
nDays=(datetime.date.today()-datetime.date(2000,1,1)).days+(51544-mjd0)
else:
nDays = ndays
endMJD=mjd0+nDays+2
if (endMJD-mjd0) < 2:
raise ValueError('Unacceptable MJD bounds.')
if self.verbose:
print ("MJD limits: %s %s"%(str(mjd0),str(endMJD)))
curdir = os.getcwd()
if self.working_dir is not None:
os.chdir(self.working_dir)
prefix = self.output or ''
if recalc_polycos:
fnames = ['%s%s'%(prefix,x) for x in
['polyco.tim','polyco_new.dat','newpolyco.dat']]
map(os.remove,filter(os.path.isfile,fnames))
obs_string = '@' if self.bary else 'coe'
out_string = '' if self.output is None else ' -polyco_file %s'%self.output
# generate a 12th order polynomial over 360 minutes unless this
# is short compared to the binary period
if (self.binary_period is not None) and (self.binary_period < 0.5):
minutes = max(60,int(self.binary_period*24*20))
else:
minutes = 360
t2cmd = 'tempo2 -f %s%s -polyco "%s %s %d 12 12 %s 0 0\"'%(
polyconame,out_string,mjd0,endMJD,minutes,obs_string)
if self.verbose:
print ('Creating polycos with command:\n',t2cmd)
o = subprocess.check_output(t2cmd,shell=True)
if self.verbose:
print (o)
fname = '%spolyco_new.dat'%(prefix)
polyconame=os.path.abspath(fname)
DEVNULL = open(os.devnull,'wb')
subprocess.call('rm %snewpolyco.dat polyco.tim'%(prefix),
shell=True,stderr=DEVNULL)
os.chdir(curdir)
return polyconame
def make_keys(self):
"""Keys for a binary search. Use the edges."""
keys = np.asarray([e.tstop for e in self.entries])
sorting = np.argsort(keys)
self.entries = np.asarray(self.entries)[sorting]
self.keys = np.append(self.entries[0].tstart,keys[sorting])
def getentry(self,t,use_keys=True):
'''Returns the polyco entry corresponding to time t (in MJD)'''
if use_keys:
idx = np.searchsorted(self.keys,t)
if np.any(idx == len(self.keys)):
print ('The following MJDS were beyond the end of the polyco validity (%s):'%(self.keys[-1]))
print (t[idx == len(self.keys)] if type(t) is type(
|
np.array([1])
|
numpy.array
|
import numpy as np
from text_selection.greedy.greedy_iterator import get_indices_with_most_new
def test_4x3_componenttest():
covered_counts = np.array(
[0, 1, 1]
)
data = np.array([
[0, 1, 1],
[1, 1, 0],
[0, 1, 0],
[1, 1, 1],
])
result = get_indices_with_most_new(data, covered_counts)
np.testing.assert_array_equal(result,
|
np.array([1, 3])
|
numpy.array
|
import cv2
import numpy
class EdgeSegmentator:
def __init__(self, distancePrecision, angularPrecision, minimumThreshold):
self.distancePrecision = distancePrecision
self.angularPrecision = angularPrecision
self.minimumThreshold = minimumThreshold
def __call__(self, frame):
return cv2.HoughLinesP(
frame,
self.distancePrecision,
self.angularPrecision,
self.minimumThreshold,
|
numpy.array([])
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# use THEANO_FLAGS='device=cuda,floatX=float32' or
# set these options in the .theanorc file’s [global] section
import numpy
import theano
import theano.tensor as T
rng = numpy.random
N = 400
feats = 784
D = (rng.randn(N, feats).astype(theano.config.floatX),
rng.randint(size=N,low=0, high=2).astype(theano.config.floatX))
training_steps = 10000
# Declare Theano symbolic variables
x = T.matrix("x")
y = T.vector("y")
w = theano.shared(rng.randn(feats).astype(theano.config.floatX), name="w")
b = theano.shared(
|
numpy.asarray(0., dtype=theano.config.floatX)
|
numpy.asarray
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# <NAME> — July 2016
""" test the MPC toolbox
"""
from __future__ import division, print_function, unicode_literals
from nose.tools import assert_true, assert_equal
from numpy.testing import assert_allclose
import numpy as np
def assert_allclose9(a,b):
return assert_allclose(a, b, 1e-9, 1e-9)
def assert_allclose6(a,b):
return assert_allclose(a, b, 1e-6, 1e-6)
import dmpc
def test_dynamics_from_thermal():
dyn = dmpc.dynamics.from_thermal(5, 1, dt=0.1)
assert_equal(dyn.A, 0.98)
assert_equal(dyn.Bu, 0.1)
assert_equal(dyn.Bp, 0.02)
assert_equal(dyn.C, 1)
dyn2 = dmpc.dynamics.from_thermal([5,5], [1,1], dt=0.1)
I2 = np.identity(2)
assert_allclose9(dyn2.A, 0.98*I2)
assert_allclose9(dyn2.Bu, 0.1*I2)
assert_allclose9(dyn2.Bp, 0.02*I2)
assert_allclose9(dyn2.C, I2)
def test_block_toeplitz():
from dmpc.mat_help import block_toeplitz
assert_allclose9(
block_toeplitz([1,2,3], [1,4,5,6]),
np.array([[1, 4, 5, 6],
[2, 1, 4, 5],
[3, 2, 1, 4]])
)
I2 = np.eye(2)
assert_allclose9(
block_toeplitz([1*I2,2*I2,3*I2], [1*I2,4*I2,5*I2,6*I2]),
np.array([[1, 0, 4, 0, 5, 0, 6, 0],
[0, 1, 0, 4, 0, 5, 0, 6],
[2, 0, 1, 0, 4, 0, 5, 0],
[0, 2, 0, 1, 0, 4, 0, 5],
[3, 0, 2, 0, 1, 0, 4, 0],
[0, 3, 0, 2, 0, 1, 0, 4]])
)
assert_allclose9(
block_toeplitz([1*I2,2*I2,3*I2], [1,4,5,6]),
np.array([[1, 0, 4, 4, 5, 5, 6, 6],
[0, 1, 4, 4, 5, 5, 6, 6],
[2, 0, 1, 0, 4, 4, 5, 5],
[0, 2, 0, 1, 4, 4, 5, 5],
[3, 0, 2, 0, 1, 0, 4, 4],
[0, 3, 0, 2, 0, 1, 4, 4]])
)
def get_dyn(c_th):
'''creates a LinDyn of a thermal system'''
r_th = 20
dt = 0.2 #h
dyn = dmpc.dynamics.from_thermal(r_th, c_th, dt, "thermal subsys")
return dyn, dt
def test_pred_mat():
'''test prediction matrices on a 2D thermal system'''
dyn, dt = get_dyn(c_th = 0.02) # 0.4 h time constant
n_hor = int(2.5/dt)
assert n_hor == 12
t = np.arange(1, n_hor+1)*dt
F, Hu, Hp = dmpc.pred_mat(n_hor, dyn.A, dyn.C, dyn.Bu, dyn.Bp)
zn = np.zeros(n_hor)[:,None]
T_ext_hor = 2 + zn # °C
u_hor = 0 + zn # kW
u_hor[t>1] = 1 #kW
T0 = 20 # °C
T_hor = np.dot(F, T0) +
|
np.dot(Hu, u_hor)
|
numpy.dot
|
import numpy as np
import xarray as xr
import logging
import warnings
import oggm
import copy
from oggm import entity_task
from oggm.core.flowline import FileModel
from oggm.exceptions import InvalidWorkflowError
# import the MBsandbox modules
from MBsandbox.mbmod_daily_oneflowline import TIModel, TIModel_Sfc_Type, RandomMassBalance_TIModel
from MBsandbox.mbmod_daily_oneflowline import (MultipleFlowlineMassBalance_TIModel,
ConstantMassBalance_TIModel,
AvgClimateMassBalance_TIModel)
from oggm.core.flowline import flowline_model_run
from oggm.core.massbalance import ConstantMassBalance
from oggm import cfg, utils
from oggm.exceptions import InvalidParamsError
log = logging.getLogger(__name__)
### maybe these won't be necessary if the OGGM core flowline run_from_climate_data
# and run_from_constant_data are enough flexible to use another MultipleFlowlineMassBalance
# model ...
# # do it similar as in run_from_climate_data()
@entity_task(log)
def run_from_climate_data_TIModel(gdir, ys=None, ye=None, min_ys=None,
max_ys=None,
store_monthly_step=False,
climate_filename='climate_historical',
climate_type='',
climate_input_filesuffix='',
output_filesuffix='',
init_model_filesuffix=None,
init_model_yr=None,
init_model_fls=None,
zero_initial_glacier=False,
bias=0,
melt_f=None,
precipitation_factor=None,
temperature_bias=None,
mb_type='mb_monthly', grad_type='cte',
mb_model_sub_class=TIModel,
kwargs_for_TIModel_Sfc_Type={},
reset=True,
**kwargs):
""" Runs a glacier with climate input from e.g. W5E5 or a GCM.
This will initialize a
:py:class:`MBsandbox.MultipleFlowlineMassBalance_TIModel`,
and run a :py:func:`oggm.core.flowline.flowline_model_run`.
same as in run_from_climate_data but compatible with TIModel
Parameters:
----------------------------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
ys : int
start year of the model run (default: from the glacier geometry
date if init_model_filesuffix is None, else init_model_yr)
ye : int
end year of the model run (default: last year of the provided
climate file)
min_ys : int
if you want to impose a minimum start year, regardless if the glacier
inventory date is earlier (e.g. if climate data does not reach).
max_ys : int
if you want to impose a maximum start year, regardless if the glacier
inventory date is later (e.g. if climate data does not reach).
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
#TODO: should this be included?
#store_model_geometry : bool
# whether to store the full model geometry run file to disk or not.
# (new in OGGM v1.4.1: default is to follow
# cfg.PARAMS['store_model_geometry'])
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_type : str
if we use 'gcm_data', this is the climate calibration dataset
(e.g. e.g. 'W5E5' or 'WFDE5_CRU')
if this is empty, the climate_input_filesuffix is used
climate_input_filesuffix: str
filesuffix for the input climate file,
if we use 'climate_historical', this can be e.g. 'W5E5' or 'WFDE5_CRU',
if we use 'gcm_data', it can be 'ISIMIP3b_ensemble_ssp'
output_filesuffix : str
for the output file
init_model_filesuffix : str
if you want to start from a previous model run state. Can be
combined with `init_model_yr`
init_model_yr : int
the year of the initial run you want to start from. The default
is to take the last year of the simulation.
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory).
Ignored if `init_model_filesuffix` is set
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
bias : float
equal to the residual in TIModel, best is to leave it at 0 !
melt_f:
calibrated melt_f (float) or 'from_json', then the saved json
file from the right prcp-fac and climate is opened and that melt_f is chosen
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
use the value from the calibration!
kwargs : dict
kwargs to pass to the FluxBasedModel instance
"""
if climate_type == '':
climate_type = climate_input_filesuffix
if init_model_filesuffix is not None:
fp = gdir.get_filepath('model_geometry',
filesuffix=init_model_filesuffix)
fmod = FileModel(fp)
if init_model_yr is None:
init_model_yr = fmod.last_yr
fmod.run_until(init_model_yr)
init_model_fls = fmod.fls
if ys is None:
ys = init_model_yr
# Take from rgi date if not set yet
if ys is None:
try:
ys = gdir.rgi_date.year
except AttributeError:
ys = gdir.rgi_date
# The RGI timestamp is in calendar date - we convert to hydro date,
# i.e. 2003 becomes 2004 (so that we don't count the MB year 2003
# in the simulation)
# See also: https://github.com/OGGM/oggm/issues/1020
ys += 1
# Final crop
if min_ys is not None:
ys = ys if ys > min_ys else min_ys
if max_ys is not None:
ys = ys if ys < max_ys else max_ys
if melt_f == 'from_json':
fs = '_{}_{}_{}'.format(climate_type, mb_type, grad_type)
d = gdir.read_json(filename='melt_f_geod', filesuffix=fs)
# get the calibrated melt_f that suits to the prcp factor
try:
melt_f_chosen = d['melt_f_pf_{}'.format(np.round(precipitation_factor, 2))]
# get the corrected ref_hgt so that we can apply this again on the mb model
# if otherwise not melt_f could be found!
ref_hgt_calib_diff = d['ref_hgt_calib_diff']
except:
raise InvalidWorkflowError('there is no calibrated melt_f for this precipitation factor, glacier, climate'
'mb_type and grad_type, need to run first melt_f_calib_geod_prep_inversion'
'with these options!')
#pd_inv_melt_f = pd.read_csv(melt_f_file, index_col='RGIId')
#melt_f_chosen = pd_inv_melt_f['melt_f_opt'].loc[gdir.rgi_id]
# use same pf as from initialisation and calibration
#np.testing.assert_allclose(precipitation_factor, pd_inv_melt_f['pf'])
else:
melt_f_chosen = melt_f
mb = MultipleFlowlineMassBalance_TIModel(gdir, mb_model_class=mb_model_sub_class,
prcp_fac=precipitation_factor,
melt_f=melt_f_chosen,
filename=climate_filename,
bias=bias,
input_filesuffix=climate_input_filesuffix,
mb_type=mb_type,
grad_type=grad_type,
# check_calib_params=check_calib_params,
**kwargs_for_TIModel_Sfc_Type)
# if temperature_bias is not None:
# mb.temp_bias = temperature_bias
if precipitation_factor is not None:
mb.prcp_fac = precipitation_factor
if temperature_bias is not None:
mb.temp_bias = temperature_bias
if melt_f == 'from_json':
# instead of the quality check we corrected the height already inside of
# melt_f_calib_geod_prep_inversion if no suitable melt_f was found
# let's just check if this has worked
np.testing.assert_allclose(ref_hgt_calib_diff,
mb.flowline_mb_models[-1].ref_hgt - mb.flowline_mb_models[-1].uncorrected_ref_hgt)
else:
# do the quality check!
mb.flowline_mb_models[-1].historical_climate_qc_mod(gdir)
if ye is None:
# Decide from climate (we can run the last year with data as well)
ye = mb.flowline_mb_models[0].ye + 1
#if isinstance(mb_model_sub_class, TIModel_Sfc_Type):
if init_model_fls is None:
fls = gdir.read_pickle('model_flowlines')
else:
fls = copy.deepcopy(init_model_fls)
if reset and mb_model_sub_class == TIModel_Sfc_Type:
mb.flowline_mb_models[-1].reset_pd_mb_bucket(init_model_fls = fls)
return flowline_model_run(gdir, output_filesuffix=output_filesuffix,
mb_model=mb, ys=ys, ye=ye,
store_monthly_step=store_monthly_step,
init_model_fls=init_model_fls,
zero_initial_glacier=zero_initial_glacier,
**kwargs)
@entity_task(log)
def run_random_climate_TIModel(gdir, nyears=1000, y0=None, halfsize=15,
mb_model_sub_class=TIModel,
temperature_bias=None,
mb_type='mb_monthly', grad_type='cte',
bias=0, seed=None,
melt_f=None,
precipitation_factor=None,
store_monthly_step=False,
store_model_geometry=None,
climate_filename='climate_historical',
climate_type='',
climate_input_filesuffix='',
output_filesuffix='', init_model_fls=None,
zero_initial_glacier=False,
unique_samples=False, #melt_f_file=None,
reset = True,
kwargs_for_TIModel_Sfc_Type={},
**kwargs):
"""Runs the random mass-balance model for a given number of years.
copy of run_random_climate --> needs to be tested ...
This will initialize a
:py:class:`MBsandbox.MultipleFlowlineMassBalance_TIModel`,
and run a :py:func:`oggm.core.flowline.flowline_model_run`.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int
length of the simulation
y0 : int, optional
central year of the random climate period. The default is to be
centred on t*.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
bias : float
equal to the residual in TIModel, best is to leave it at 0 !
seed : int
seed for the random generator. If you ignore this, the runs will be
different each time. Setting it to a fixed seed across glaciers can
be useful if you want to have the same climate years for all of them
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
#TODO: should this be included?
#store_model_geometry : bool
# whether to store the full model geometry run file to disk or not.
# (new in OGGM v1.4.1: default is to follow
# cfg.PARAMS['store_model_geometry'])
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_type : str
if we use 'gcm_data', this is the climate calibration dataset
(e.g. e.g. 'W5E5' or 'WFDE5_CRU')
if this is empty, the climate_input_filesuffix is used
climate_input_filesuffix: str
filesuffix for the input climate file,
if we use 'climate_historical', this can be e.g. 'W5E5' or 'WFDE5_CRU',
if we use 'gcm_data', it can be 'ISIMIP3b_ensemble_ssp'
output_filesuffix : str
for the output file
init_model_filesuffix : str
if you want to start from a previous model run state. Can be
combined with `init_model_yr`
init_model_yr : int
the year of the initial run you want to start from. The default
is to take the last year of the simulation.
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory).
Ignored if `init_model_filesuffix` is set
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
melt_f:
calibrated melt_f (float) or 'from_json', then the saved json
file from the right prcp-fac and climate is opened and that melt_f is chosen
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
use the value from the calibration!
unique_samples: bool
if true, chosen random mass-balance years will only be available once
per random climate period-length
if false, every model year will be chosen from the random climate
period with the same probability
kwargs : dict
kwargs to pass to the FluxBasedModel instance
"""
if climate_type == '':
climate_type = climate_input_filesuffix
if melt_f == 'from_json':
fs = '_{}_{}_{}'.format(climate_type, mb_type, grad_type)
d = gdir.read_json(filename='melt_f_geod', filesuffix=fs)
# get the calibrated melt_f that suits to the prcp factor
try:
melt_f_chosen = d['melt_f_pf_{}'.format(np.round(precipitation_factor, 2))]
ref_hgt_calib_diff = d['ref_hgt_calib_diff']
except:
raise InvalidWorkflowError('there is no calibrated melt_f for this precipitation factor, glacier, climate'
'mb_type and grad_type, need to run first melt_f_calib_geod_prep_inversion'
'with these options!')
# old method: use csv file to get the calibrated melt_f
#pd_inv_melt_f = pd.read_csv(melt_f_file, index_col='RGIId')
#melt_f_chosen = pd_inv_melt_f['melt_f_opt'].loc[gdir.rgi_id]
# use same pf as from initialisation and calibration
#np.testing.assert_allclose(precipitation_factor, pd_inv_melt_f['pf'])
else:
melt_f_chosen = melt_f
mb = MultipleFlowlineMassBalance_TIModel(gdir,
mb_model_class=RandomMassBalance_TIModel,
y0=y0, halfsize=halfsize,
melt_f=melt_f_chosen,
prcp_fac=precipitation_factor,
mb_type=mb_type,
grad_type=grad_type,
bias = bias,
seed=seed,
mb_model_sub_class = mb_model_sub_class,
filename=climate_filename,
input_filesuffix=climate_input_filesuffix,
unique_samples=unique_samples,
**kwargs_for_TIModel_Sfc_Type)
if precipitation_factor is not None:
mb.prcp_fac = precipitation_factor
if temperature_bias is not None:
mb.temp_bias = temperature_bias
if melt_f == 'from_json':
# instead of the quality check we corrected the height already inside of
# melt_f_calib_geod_prep_inversion if no suitable melt_f was found
# let's just check if this has worked
np.testing.assert_allclose(ref_hgt_calib_diff,
mb.flowline_mb_models[-1].mbmod.ref_hgt - mb.flowline_mb_models[-1].mbmod.uncorrected_ref_hgt)
else:
# do the quality check!
mb.flowline_mb_models[-1].historical_climate_qc_mod(gdir)
if init_model_fls is None:
fls = gdir.read_pickle('model_flowlines')
else:
fls = copy.deepcopy(init_model_fls)
if reset and mb_model_sub_class == TIModel_Sfc_Type:
mb.flowline_mb_models[-1].mbmod.reset_pd_mb_bucket(init_model_fls = fls)
# do once the spinup manually but then not again
if mb_model_sub_class == TIModel_Sfc_Type:
spinup_yrs = kwargs_for_TIModel_Sfc_Type['spinup_yrs']
mb.flowline_mb_models[-1].mbmod.get_specific_mb(year=np.arange(y0-halfsize-spinup_yrs,
y0-halfsize),
fls=fls)
# spinup is done, now set the spinup_yrs to zero for the actual run!!!
mb.flowline_mb_models[-1].mbmod.spinup_yrs = 0
return flowline_model_run(gdir, output_filesuffix=output_filesuffix,
mb_model=mb, ys=0, ye=nyears,
store_monthly_step=store_monthly_step,
store_model_geometry=store_model_geometry,
init_model_fls=init_model_fls,
zero_initial_glacier=zero_initial_glacier,
**kwargs)
# work in Process:
# problem: don't have a constant mb TIModel, this would be quite a lot of work ...
# not yet adapted at all, first need a new ConstantMbModel_TIModel!!
@entity_task(log)
def run_constant_climate_TIModel(gdir, nyears=1000, y0=None, halfsize=15,
bias=None, temperature_bias=None,
precipitation_factor=None,
mb_type='mb_monthly', grad_type='cte',
melt_f=None,
store_monthly_step=False,
store_model_geometry=None,
init_model_filesuffix=None,
init_model_yr=None,
output_filesuffix='',
climate_filename='climate_historical',
climate_type='',
climate_input_filesuffix='',
mb_model_sub_class=TIModel,
init_model_fls=None,
zero_initial_glacier=False,
kwargs_for_TIModel_Sfc_Type = {},
reset = True,
interpolation_optim=False,
use_avg_climate=False,
**kwargs):
"""Runs the constant mass-balance model of the TIModel
for a given number of years.
This is equivalent to run_constant_climate but is compatible with TIModel
This will initialize a
:py:class:`oggm.core.massbalance.MultipleFlowlineMassBalance_TIModel`,
and run a :py:func:`oggm.core.flowline.flowline_model_run`.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int
length of the simulation (default: as long as needed for reaching
equilibrium)
y0 : int
central year of the requested climate period. The default is to be
centred on t*.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
bias : float
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
default is None and means that the precipitation factor from the
calibration is applied which is cfg.PARAMS['prcp_scaling_factor']
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
(new in OGGM v1.4.1: default is to follow
cfg.PARAMS['store_model_geometry'])
init_model_filesuffix : str
if you want to start from a previous model run state. Can be
combined with `init_model_yr`
init_model_yr : int
the year of the initial run you want to start from. The default
is to take the last year of the simulation.
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_type : str
if we use 'gcm_data', this is the climate calibration dataset
(e.g. e.g. 'W5E5' or 'WFDE5_CRU')
if this is empty, the climate_input_filesuffix is used
climate_input_filesuffix: str
filesuffix for the input climate file,
if we use 'climate_historical', this can be e.g. 'W5E5' or 'WFDE5_CRU',
if we use 'gcm_data', it can be 'ISIMIP3b_ensemble_ssp'
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
mb_model_sub_class : class
which child class of TIModel_Parent should be used, either TIModel (default)
or TIModel_Sfc_Type
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory)
kwargs_for_TIModel_Sfc_Type : dict
default is empty dictionary, kwargs to pass to the TIModel_Sfc_Type instance,
to change these params: melt_f_ratio_snow_to_ice, melt_f_update, spinup_yrs,
tau_e_fold_yr, melt_f_change; if mb_model_sub_class is TIModel, this should be
an empty dict!
kwargs : dict
kwargs to pass to the FluxBasedModel instance
"""
if climate_type == '':
climate_type = climate_input_filesuffix
if init_model_filesuffix is not None:
fp = gdir.get_filepath('model_geometry',
filesuffix=init_model_filesuffix)
fmod = FileModel(fp)
if init_model_yr is None:
init_model_yr = fmod.last_yr
fmod.run_until(init_model_yr)
init_model_fls = fmod.fls
if melt_f == 'from_json':
fs = '_{}_{}_{}'.format(climate_type, mb_type, grad_type)
d = gdir.read_json(filename='melt_f_geod', filesuffix=fs)
# get the calibrated melt_f that suits to the prcp factor
try:
melt_f_chosen = d['melt_f_pf_{}'.format(np.round(precipitation_factor, 2))]
ref_hgt_calib_diff = d['ref_hgt_calib_diff']
except:
raise InvalidWorkflowError('there is no calibrated melt_f for this precipitation factor, glacier, climate'
'mb_type and grad_type, need to run first melt_f_calib_geod_prep_inversion'
'with these options!')
#pd_inv_melt_f = pd.read_csv(melt_f_file, index_col='RGIId')
#melt_f_chosen = pd_inv_melt_f['melt_f_opt'].loc[gdir.rgi_id]
# use same pf as from initialisation and calibration
#np.testing.assert_allclose(precipitation_factor, pd_inv_melt_f['pf'])
else:
melt_f_chosen = melt_f
if mb_model_sub_class == TIModel and kwargs_for_TIModel_Sfc_Type != {}:
raise InvalidWorkflowError('if mb_model_sub_class is TIModel,'
' this should be an empty dict!')
if use_avg_climate:
mb_sub_model_class = AvgClimateMassBalance_TIModel
else:
mb_sub_model_class = ConstantMassBalance_TIModel
mb = MultipleFlowlineMassBalance_TIModel(gdir,
mb_model_class=mb_sub_model_class,
y0=y0, halfsize=halfsize,
bias=bias,
melt_f=melt_f_chosen,
prcp_fac=precipitation_factor,
mb_type=mb_type,
grad_type=grad_type,
filename=climate_filename,
input_filesuffix=climate_input_filesuffix,
mb_model_sub_class=mb_model_sub_class,
interpolation_optim=interpolation_optim,
**kwargs_for_TIModel_Sfc_Type)
if precipitation_factor is not None:
mb.prcp_fac = precipitation_factor
if temperature_bias is not None:
mb.temp_bias = temperature_bias
if melt_f == 'from_json':
# instead of the quality check we corrected the height already inside of
# melt_f_calib_geod_prep_inversion if no suitable melt_f was found
# let's just check if this has worked)
np.testing.assert_allclose(ref_hgt_calib_diff,
mb.flowline_mb_models[-1].mbmod.ref_hgt - mb.flowline_mb_models[
-1].mbmod.uncorrected_ref_hgt)
else:
# do the quality check!
mb.flowline_mb_models[-1].historical_climate_qc_mod(gdir)
if init_model_fls is None:
fls = gdir.read_pickle('model_flowlines')
else:
fls = copy.deepcopy(init_model_fls)
if reset and mb_model_sub_class == TIModel_Sfc_Type:
mb.flowline_mb_models[-1].reset_pd_mb_bucket(init_model_fls = fls)
return flowline_model_run(gdir, output_filesuffix=output_filesuffix,
mb_model=mb, ys=0, ye=nyears,
store_monthly_step=store_monthly_step,
store_model_geometry=store_model_geometry,
init_model_fls=init_model_fls,
zero_initial_glacier=zero_initial_glacier,
**kwargs)
@entity_task(log)
def run_with_hydro_daily(gdir, run_task=None, ref_area_from_y0=False, Testing=False, **kwargs):
"""Run the flowline model and add hydro diagnostics on daily basis (experimental!).
Parameters
----------
run_task : func
any of the `run_*`` tasks in the MBSandbox.flowline_TIModel module.
The mass-balance model used needs to have the `add_climate` output
kwarg available though.
ref_area_from_y0 : bool
the hydrological output is computed over a reference area, which
per default is the largest area covered by the glacier in the simulation
period. Use this kwarg to force a specific area to the state of the
glacier at the provided simulation year.
Testing: if set to true, the 29th of February is set to nan values in non-leap years, so that the remaining days
are at the same index in non-leap and leap years, if set to false the last 366th day in non-leap years
is set to zero
**kwargs : all valid kwargs for ``run_task``
"""
# Make sure it'll return something
kwargs['return_value'] = True
# Check that kwargs are compatible
if kwargs.get('store_monthly_step', False):
raise InvalidParamsError('run_with_hydro only compatible with '
'store_monthly_step=False.')
if kwargs.get('mb_elev_feedback', 'annual') != 'annual':
raise InvalidParamsError('run_with_hydro_daily only compatible with '
"mb_elev_feedback='annual' (yes, even "
"when asked for monthly hydro output).")
out = run_task(gdir, **kwargs)
if out is None:
raise InvalidWorkflowError('The run task ({}) did not run '
'successfully.'.format(run_task.__name__))
# Mass balance model used during the run
mb_mod = out.mb_model
# Glacier geometry during the run
suffix = kwargs.get('output_filesuffix', '')
# We start by fetching mass balance data and geometry for all years
# model_geometry files always retrieve yearly timesteps
fmod = FileModel(gdir.get_filepath('model_geometry', filesuffix=suffix))
# The last one is the final state - we can't compute MB for that
years = fmod.years[:-1]
# Geometry at y0 to start with + off-glacier snow bucket
bin_area_2ds = []
bin_elev_2ds = []
ref_areas = []
snow_buckets = []
for fl in fmod.fls:
# Glacier area on bins
bin_area = fl.bin_area_m2
ref_areas.append(bin_area)
# snow_buckets.append(bin_area * 0)
# snow_buckets.append(np.zeros(len(bin_area)))
snow_buckets.append(np.zeros(len(bin_area)))
# Output 2d data
shape = len(years), len(bin_area)
bin_area_2ds.append(
|
np.empty(shape, np.float64)
|
numpy.empty
|
"""Force Mapping Utilities"""
import numpy as np
import sharpy.utils.algebra as algebra
def aero2struct_force_mapping(aero_forces,
struct2aero_mapping,
zeta,
pos_def,
psi_def,
master,
conn,
cag=np.eye(3),
aero_dict=None):
r"""
Maps the aerodynamic forces at the lattice to the structural nodes
The aerodynamic forces from the UVLM are always in the inertial ``G`` frame of reference and have to be transformed
to the body or local ``B`` frame of reference in which the structural forces are defined.
Since the structural nodes and aerodynamic panels are coincident in a spanwise direction, the aerodynamic forces
that correspond to a structural node are the summation of the ``M+1`` forces defined at the lattice at that
spanwise location.
.. math::
\mathbf{f}_{struct}^B &= \sum\limits_{i=0}^{m+1}C^{BG}\mathbf{f}_{i,aero}^G \\
\mathbf{m}_{struct}^B &= \sum\limits_{i=0}^{m+1}C^{BG}(\mathbf{m}_{i,aero}^G +
\tilde{\boldsymbol{\zeta}}^G\mathbf{f}_{i, aero}^G)
where :math:`\tilde{\boldsymbol{\zeta}}^G` is the skew-symmetric matrix of the vector between the lattice
grid vertex and the structural node.
Args:
aero_forces (list): Aerodynamic forces from the UVLM in inertial frame of reference
struct2aero_mapping (dict): Structural to aerodynamic node mapping
zeta (list): Aerodynamic grid coordinates
pos_def (np.ndarray): Vector of structural node displacements
psi_def (np.ndarray): Vector of structural node rotations (CRVs)
master: Unused
conn (np.ndarray): Connectivities matrix
cag (np.ndarray): Transformation matrix between inertial and body-attached reference ``A``
aero_dict (dict): Dictionary containing the grid's information.
Returns:
np.ndarray: structural forces in an ``n_node x 6`` vector
"""
n_node, _ = pos_def.shape
n_elem, _, _ = psi_def.shape
struct_forces = np.zeros((n_node, 6))
nodes = []
for i_elem in range(n_elem):
for i_local_node in range(3):
i_global_node = conn[i_elem, i_local_node]
if i_global_node in nodes:
continue
nodes.append(i_global_node)
for mapping in struct2aero_mapping[i_global_node]:
i_surf = mapping['i_surf']
i_n = mapping['i_n']
_, n_m, _ = aero_forces[i_surf].shape
crv = psi_def[i_elem, i_local_node, :]
cab = algebra.crv2rotation(crv)
cbg = np.dot(cab.T, cag)
for i_m in range(n_m):
chi_g = zeta[i_surf][:, i_m, i_n] - np.dot(cag.T, pos_def[i_global_node, :])
struct_forces[i_global_node, 0:3] += np.dot(cbg, aero_forces[i_surf][0:3, i_m, i_n])
struct_forces[i_global_node, 3:6] += np.dot(cbg, aero_forces[i_surf][3:6, i_m, i_n])
struct_forces[i_global_node, 3:6] += np.dot(cbg, algebra.cross3(chi_g, aero_forces[i_surf][0:3, i_m, i_n]))
return struct_forces
def total_forces_moments(forces_nodes_a,
pos_def,
ref_pos=np.array([0., 0., 0.])):
"""
Performs a summation of the forces and moments expressed at the structural nodes in the A frame of reference.
Note:
If you need to transform forces and moments at the nodes from B to A, use the
:func:`~sharpy.structure.models.beam.Beam.nodal_b_for_2_a_for()` function.
Args:
forces_nodes_a (np.array): ``n_node x 6`` vector of forces and moments at the nodes in A
pos_def (np.array): ``n_node x 3`` vector of nodal positions in A
ref_pos (np.array (optional)): Location in A about which to compute moments. Defaults to ``[0, 0, 0]``
Returns:
np.array: Vector of length 6 containing the total forces and moments expressed in A at the desired location.
"""
num_node = pos_def.shape[0]
ra_vec = pos_def - ref_pos
total_forces = np.zeros(3)
total_moments =
|
np.zeros(3)
|
numpy.zeros
|
import numpy as np
def Ber(p, size=1):
return np.random.binomial(1,p, size=size)
class Users():
def __init__(self, T, l, a, b, eps, tau, n, m, sizes, means):
self.a = a
self.b = b
self.T = T
self.eps = eps
self.l = l
self.cV = np.zeros((n,))
self.cE = np.zeros((n,))
self.tau = tau
self.n = n
assert n == sum(sizes), f"group sizes should add up to {n}"
self.m = m
self.sizes = sizes
self.means = means
self.est_last = np.zeros(n, dtype='float32')
self.est_curr = np.zeros(n, dtype='float32')
def get_true_avg(self):
return sum([s*m for s,m in zip(self.sizes, self.means)]) / self.n
def groups(self):
for i,s in enumerate(self.sizes):
beg = sum(self.sizes[:i])
group_idx = slice(beg, beg+s)
yield i, group_idx
def local_estimate(self):
for i,idx in self.groups():
self.est_curr[idx] = np.random.binomial(self.l,self.means[i], size=self.sizes[i])/self.l
return self.est_curr
def vote(self, t, debug=True):
log_T = int(np.floor(np.log(self.T)))
diff = np.abs(self.local_estimate() - self.est_last)
print(f"diff max: {diff.max():.4f}\ndiff mean: {diff.mean():.4f}")
b_star = np.c_[[b*(diff > self.tau(b)) for b in range(-1, log_T+1)]].max(axis=0)
if debug:
print(f"b*={b_star.mean()}")
VoteYes = (self.cV < self.eps/4) &
|
np.logical_not(t % 2**(log_T - b_star))
|
numpy.logical_not
|
# generic libraries
import numpy as np
from scipy import ndimage, linalg
from scipy.interpolate import griddata
def conv_2Dfilter(I, kernel):
sub_shape = I.shape - kernel.shape + 1
view_shape = tuple(np.subtract(I.shape, sub_shape) + 1) + sub_shape
strides = I.strides + I.strides
sub_matrices = np.lib.stride_tricks.as_strided(I, view_shape, strides)
V = np.einsum('ij,ijkl->kl', kernel, sub_matrices)
return V
# def conv_2Dfft(): todo: https://gist.github.com/thearn/5424195
def toeplitz_block(kernel, image_size):
# from https://stackoverflow.com/questions/56702873/is-there-an-function-in-pytorch-for-converting-convolutions-to-fully-connected-n
# admin
k_h, k_w = kernel.shape
i_h, i_w = image_size
o_h, o_w = i_h-k_h+1, i_w-k_w+1
# construct 1d conv toeplitz matrices for each row of the kernel
toeplitz = []
for r in range(k_h):
toeplitz.append(linalg.toeplitz(c=(kernel[r,0], *np.zeros(i_w-k_w)),
r=(*kernel[r], *np.zeros(i_w-k_w))) )
# construct toeplitz matrix of toeplitz matrices (just for padding=0)
h_blocks, w_blocks = o_h, i_h
h_block, w_block = toeplitz[0].shape
W_conv = np.zeros((h_blocks, h_block, w_blocks, w_block))
for i, B in enumerate(toeplitz):
for j in range(o_h):
W_conv[j, :, i+j, :] = B
W_conv.shape = (h_blocks*h_block, w_blocks*w_block)
return W_conv
def nan_resistant_conv2(I, kernel, size='same', cval=np.nan):
""" estimates a convolution on a corrupted array, making use of the energy
perserving principle, see [1].
Parameters
----------
I : np.array, size=(m,n)
array with intensity value
kernel : np.array, size=(k,l)
kernel to be used for the convolution
size : {'same','smaller'}
padding is done with NaN's
cval : integer
constant padding value
Returns
-------
U : np.array
References
----------
.. [1] Altena et al. "Correlation dispersion as a measure to better estimate
uncertainty of remotely sensed glacier displacements" The cryosphere
"""
m,n = I.shape
k,l = kernel.shape
# set the stage
C = np.isnan(I.flatten()) # classification
A = toeplitz_block(kernel, I.shape).T # design matrix
for i in range(A.shape[1]):
if np.sum(A[:,i]) != np.sum(A[C,i]):
idx = np.where(A[:,i]!=0)[0]
val, sat = np.squeeze(A[idx,i]), C[idx]
if np.all(~sat): # all NaN's
A[idx,:] = 0
else:
# redistribute the energy to comply with the closed system
# constrain and based on relative weights
surplus = np.sum(val[sat]) # energy to distribute
contrib = surplus*(val[~sat]/np.sum(val[~sat]))
A[idx[~sat],i] += contrib
# estimate
I = I.flatten()
I[C] = 0
U = np.dot(A.T,I)
# reorganize
pad_size = (k//2, l//2)
U = U.reshape(m-2*pad_size[0],n-2*pad_size[1])
if size in ['same']:
U = np.pad(U, ((pad_size[0],pad_size[0]),(pad_size[1],pad_size[1])),
mode='constant', constant_values=cval)
return U
def nan_resistant_diff2(I, kernel, size='same', cval=np.nan):
""" estimates a differential convolution on a corrupted array, making use
of the energy perserving principle, see [1].
Parameters
----------
I : np.array, size=(m,n)
array with intensity value
kernel : np.array, size=(k,l)
kernel to be used for the convolution
size : {'same','smaller'}
padding is done with NaN's
cval : integer
constant padding value
Returns
-------
U : np.array
References
----------
.. [1] Altena et al. "Correlation dispersion as a measure to better estimate
uncertainty of remotely sensed glacier displacements" The cryosphere
"""
m,n = I.shape
k,l = kernel.shape
# set the stage
C = np.isnan(I.flatten()) # classification
A = toeplitz_block(kernel, I.shape).T # design matrix
for i in range(A.shape[1]):
if np.sum(A[:,i]) != np.sum(A[C,i]):
idx = np.where(A[:,i]!=0)[0]
val = np.squeeze(A[idx,i])
sat = C[idx]
# redistribute the energy to comply with the closed system constrain
surplus = np.sum(val[sat]) # energy to distribute
posse = np.sign(val)
if (np.sign(surplus)==-1) and np.any(posse==+1):
verdict = +1
elif (np.sign(surplus)==+1) and np.any(posse==-1):
verdict = -1
else:
verdict = 0
if verdict==0:
A[:,i] = 0
else:
idx_sub = idx[np.logical_and(posse==verdict, ~sat)]
contrib = np.divide(surplus, idx_sub.size)
A[idx_sub,i] += contrib
# estimate
I = I.flatten()
I[C] = 0
U = np.dot(A.T,I)
# reorganize
pad_size = (k//2, l//2)
U = U.reshape(m-2*pad_size[0],n-2*pad_size[1])
if size in ['same']:
U = np.pad(U, ((pad_size[0],pad_size[0]),(pad_size[1],pad_size[1])),
mode='constant', constant_values=cval)
return U
def select_boi_from_stack(I, boi):
""" give array with selection given by a pointing array
Parameters
----------
I : np.array, size=(m,n,b), ndim={2,3}
data array.
boi : np.array, size=(k,1), dtype=integer
array giving the bands of interest.
Returns
-------
I_new : np.array, size=(m,n,k), ndim={2,3}
selection of bands, in the order given by boi.
"""
assert type(I)==np.ndarray, ("please provide an array")
assert type(boi)==np.ndarray, ("please provide an array")
if boi.shape[0]>0:
if I.ndim>2:
ndim = I.shape[2]
assert(ndim>=np.max(boi)) # boi pointer should not exceed bands
I_new = I[:,:,boi]
else:
I_new = I
else:
I_new = I
return I_new
def get_image_subset(I, bbox):
""" get subset of an image specified by the bounding box extents
Parameters
----------
I : np.array, size=(m,n), ndim={2,3,4}
data array
bbox : np.array, size=(1,4)
[minimum row, maximum row, minimum collumn maximum collumn].
Returns
-------
sub_I : np.array, size=(k,l), ndim={2,3,4}
sub set of the data array.
"""
assert type(I)==np.ndarray, ("please provide an array")
assert(bbox[0]<=bbox[1])
assert(bbox[2]<=bbox[3])
if I.ndim==2:
sub_I = I[bbox[0]:bbox[1],bbox[2]:bbox[3]]
elif I.ndim==3:
sub_I = I[bbox[0]:bbox[1],bbox[2]:bbox[3],:]
elif I.ndim==4:
sub_I = I[bbox[0]:bbox[1],bbox[2]:bbox[3],:,:]
return sub_I
def bilinear_interpolation(I, di, dj):
""" do bilinear interpolation of an image at different locations
Parameters
----------
I : np.array, size=(m,n), ndim={2,3}
data array.
di :
* np.array, size=(k,l), ndim=2
vertical locations, within local image frame.
* float
uniform horizontal displacement
dj :
* np.array, size=(k,l), ndim=2
horizontal locations, within local image frame.
* float
uniform horizontal displacement
Returns
-------
I_new : np.array, size=(k,l), ndim=2, dtype={float,complex}
interpolated values.
See Also
--------
.mapping_tools.bilinear_interp_excluding_nodat
Notes
-----
Two different coordinate system are used here:
.. code-block:: text
indexing | indexing ^ y
system 'ij'| system 'xy' |
| |
| j | x
--------+--------> --------+-------->
| |
| |
image | i map |
based v based |
"""
assert type(I)==np.ndarray, ("please provide an array")
if isinstance(dj, (float, int)): # integer values: constant displacement
mI, nI = I.shape[0], I.shape[1]
(dj, di) = np.meshgrid(
|
np.linspace(0, nI-1, nI)
|
numpy.linspace
|
import numpy as np
import util
class TestSGD:
def test_rosen(self):
util.assert_progress(
*util.build_rosen('sgd'),
monitor_gradients=True)
def test_factor(self):
util.assert_progress(
*util.build_factor('sgd'),
max_gradient_elem=1,
nesterov=False)
def test_factor_nesterov(self):
util.assert_progress(
*util.build_factor('sgd'),
max_gradient_norm=1)
def test_default_params(self):
opt, data = util.build_rosen('sgd')
for _ in opt.iterate(data):
assert opt.nesterov is False
assert np.allclose(opt.learning_rate.eval(), 1e-4)
assert np.allclose(opt.momentum, 0)
assert np.allclose(opt.patience, 5)
assert np.allclose(opt.min_improvement, 0)
assert np.allclose(opt.max_gradient_norm, 0)
assert np.allclose(opt.max_gradient_elem, 0)
break
def test_params(self):
opt, data = util.build_rosen('sgd')
for _ in opt.iterate(data,
learning_rate=0.3,
momentum=10,
patience=20,
min_improvement=0.1,
max_gradient_elem=4,
max_gradient_norm=5,
nesterov=True):
assert opt.nesterov is True
assert np.allclose(opt.learning_rate.eval(), 0.3)
assert np.allclose(opt.momentum, 10)
assert np.allclose(opt.patience, 20)
assert np.allclose(opt.min_improvement, 0.1)
assert np.allclose(opt.max_gradient_norm, 5)
assert np.allclose(opt.max_gradient_elem, 4)
break
class TestNAG:
def test_rosen(self):
util.assert_progress(*util.build_rosen('nag'))
def test_factor(self):
util.assert_progress(*util.build_factor('nag'), max_gradient_elem=1)
def test_default_params(self):
opt, data = util.build_rosen('nag')
for _ in opt.iterate(data):
assert opt.nesterov is True
assert np.allclose(opt.learning_rate.eval(), 1e-4)
assert np.allclose(opt.momentum, 0)
assert np.allclose(opt.patience, 5)
assert np.allclose(opt.min_improvement, 0)
assert np.allclose(opt.max_gradient_norm, 0)
assert
|
np.allclose(opt.max_gradient_elem, 0)
|
numpy.allclose
|
import os
import struct
from numpy import array, float32
import numpy as np
from pyV3D.sender import WV_Sender
class STLGeometryObject(object):
'''This is an object that follows the IStaticGeometry interface.
'''
def __init__(self, filename):
self.filename = filename
self.geom_name = os.path.basename(filename)[:-4]
def get_visualization_data(self, wv, *args, **kwargs):
'''Load a tesselation from a geometry model.
wv: WV_Wrapper instance
The pyV3D WV_Wrapper object
'''
# Determine if we are binary or ascii
binary = False
with open(self.filename, 'rU') as stl:
for line in stl:
line = line.strip()
if not line:
continue
fields = line.split()
if fields[0] not in ('solid'):
binary = True
break
if binary:
with open(self.filename, 'rb') as stl:
self._load_binary(wv, stl)
else:
with open(self.filename, 'rU') as stl:
self._load_ascii(wv, stl)
def _load_ascii(self, wv, stl):
'''Load from ascii STL file.'''
vertices = []
normals = []
nsolid = 0
for line in stl:
line = line.strip()
if not line:
continue
fields = line.split()
if fields[0] in ('solid', 'outer',
'endloop', 'endfacet'):
continue
elif fields[0] == 'facet':
# Replicate normal for each vertex.
normal = [float(xyz) for xyz in fields[2:]]
normals.extend(normal)
normals.extend(normal)
normals.extend(normal)
elif fields[0] == 'vertex':
vertices.extend([float(xyz) for xyz in fields[1:]])
# Finish with this solid and prepare for next one.
elif fields[0] == 'endsolid':
ntri = len(vertices)/3
nsolid += 1
wv.set_face_data(points=np.array(vertices, dtype=np.float32),
tris=np.array(range(1, ntri+1), dtype=np.int32),
normals=np.array(normals, dtype=np.float32),
bbox=self._get_bbox(ntri/3, vertices),
name="%s_solid%d"%(self.geom_name, nsolid))
normals = []
vertices = []
def _load_binary(self, wv, stl):
'''Load from binary STL file.'''
BINARY_HEADER ="80sI"
BINARY_FACET = "12fH"
vertices = []
normals = []
header, ntri = struct.unpack(BINARY_HEADER, stl.read(84))
def remove_non_ascii(s):
return "".join([i for i in s if ord(i)<128])
header = remove_non_ascii(header)
for i in xrange(0, ntri):
facet = struct.unpack(BINARY_FACET, stl.read(50))
normal = [float(xyz) for xyz in facet[0:3]]
normals.extend(normal)
normals.extend(normal)
normals.extend(normal)
vertices.extend([float(xyz) for xyz in facet[3:12]])
wv.set_face_data(points=np.array(vertices, dtype=np.float32),
tris=np.array(range(1, 3*ntri+1), dtype=np.int32),
colors=None,
normals=
|
np.array(normals, dtype=np.float32)
|
numpy.array
|
import numpy as np
import threading
import queue
try:
import rasterio
except ImportError:
rasterio = None
try:
import affine
except ImportError:
affine = None
try:
import scipy
except ImportError:
scipy = None
class Raster:
"""
The Raster object is used for cropping, sampling raster values,
and re-sampling raster values to grids, and provides methods to
plot rasters and histograms of raster digital numbers for visualization
and analysis purposes.
Parameters
----------
array : np.ndarray
a three dimensional array of raster values with dimensions
defined by (raster band, nrow, ncol)
bands : tuple
a tuple of raster bands
crs : int, string, rasterio.crs.CRS object
either a epsg code, a proj4 string, or a CRS object
transform : affine.Affine object
affine object, which is used to define geometry
nodataval : float
raster no data value
rio_ds : DatasetReader object
rasterIO dataset Reader object
Notes
-----
Examples
--------
>>> from flopy.utils import Raster
>>>
>>> rio = Raster.load("myraster.tif")
"""
FLOAT32 = (float, np.float32, np.float_)
FLOAT64 = (np.float64,)
INT8 = (np.int8, np.uint8)
INT16 = (np.int16, np.uint16)
INT32 = (int, np.int32, np.uint32, np.uint, np.uintc, np.uint32)
INT64 = (np.int64, np.uint64)
def __init__(
self,
array,
bands,
crs,
transform,
nodataval,
driver="GTiff",
rio_ds=None,
):
if rasterio is None:
msg = (
"Raster(): error "
+ 'importing rasterio - try "pip install rasterio"'
)
raise ImportError(msg)
else:
from rasterio.crs import CRS
if affine is None:
msg = (
"Raster(): error "
+ 'importing affine - try "pip install affine"'
)
raise ImportError(msg)
from .geometry import point_in_polygon
self._point_in_polygon = point_in_polygon
self._array = array
self._bands = bands
meta = {"driver": driver, "nodata": nodataval}
# create metadata dictionary
if array.dtype in Raster.FLOAT32:
dtype = "float32"
elif array.dtype in Raster.FLOAT64:
dtype = "float64"
elif array.dtype in Raster.INT8:
dtype = "int8"
elif array.dtype in Raster.INT16:
dtype = "int16"
elif array.dtype in Raster.INT32:
dtype = "int32"
elif array.dtype in Raster.INT64:
dtype = "int64"
else:
raise TypeError("dtype cannot be determined from Raster")
meta["dtype"] = dtype
if isinstance(crs, CRS):
pass
elif isinstance(crs, int):
crs = CRS.from_epsg(crs)
elif isinstance(crs, str):
crs = CRS.from_string(crs)
else:
TypeError("crs type not understood, provide an epsg or proj4")
meta["crs"] = crs
count, height, width = array.shape
meta["count"] = count
meta["height"] = height
meta["width"] = width
if not isinstance(transform, affine.Affine):
raise TypeError("Transform must be defined by an Affine object")
meta["transform"] = transform
self._meta = meta
self._dataset = None
self.__arr_dict = {
self._bands[b]: arr for b, arr in enumerate(self._array)
}
self.__xcenters = None
self.__ycenters = None
if isinstance(rio_ds, rasterio.io.DatasetReader):
self._dataset = rio_ds
@property
def bounds(self):
"""
Returns a tuple of xmin, xmax, ymin, ymax boundaries
"""
height = self._meta["height"]
width = self._meta["width"]
transform = self._meta["transform"]
xmin = transform[2]
ymax = transform[5]
xmax, ymin = transform * (width, height)
return xmin, xmax, ymin, ymax
@property
def bands(self):
"""
Returns a tuple of raster bands
"""
if self._dataset is None:
return tuple(self._bands)
else:
return self._dataset.indexes
@property
def nodatavals(self):
"""
Returns a Tuple of values used to define no data
"""
if self._dataset is None:
if isinstance(self._meta["nodata"], list):
nodata = tuple(self._meta["nodata"])
elif isinstance(self._meta["nodata"], tuple):
nodata = self._meta["nodata"]
else:
nodata = (self._meta["nodata"],)
return nodata
else:
return self._dataset.nodatavals
@property
def xcenters(self):
"""
Returns a np.ndarray of raster x cell centers
"""
if self.__xcenters is None:
self.__xycenters()
return self.__xcenters
@property
def ycenters(self):
"""
Returns a np.ndarray of raster y cell centers
"""
if self.__ycenters is None:
self.__xycenters()
return self.__ycenters
def __xycenters(self):
"""
Method to create np.arrays of the xy-cell centers
in the raster object
"""
arr = None
for _, arr in self.__arr_dict.items():
break
if arr is None:
raise AssertionError("No array data was found")
ylen, xlen = arr.shape
# assume that transform is an unrotated plane
# if transform indicates a rotated plane additional
# processing will need to be added in this portion of the code
xd = abs(self._meta["transform"][0])
yd = abs(self._meta["transform"][4])
x0, x1, y0, y1 = self.bounds
# adjust bounds to centroids
x0 += xd / 2.0
x1 -= xd / 2.0
y0 += yd / 2.0
y1 -= yd / 2.0
x = np.linspace(x0, x1, xlen)
y = np.linspace(y1, y0, ylen)
self.__xcenters, self.__ycenters =
|
np.meshgrid(x, y)
|
numpy.meshgrid
|
import json
import os
from typing import Tuple, Optional, List
import numpy as np
from rlgym.utils import RewardFunction
from rlgym.utils.gamestates import PlayerData, GameState
from rlgym.utils.reward_functions import CombinedReward
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.logger import Logger
class SB3LogReward(RewardFunction):
"""
Simple reward function for logging individual rewards to a custom Logger.
"""
def __init__(self, logger: Logger, reward_function: RewardFunction):
super().__init__()
self.logger = logger
self.reward_function = reward_function
self.reward_sum = 0
self.episode_steps = 0
self.global_steps = 0
def reset(self, initial_state: GameState):
if self.episode_steps > 0:
rew_fn_type = type(self.reward_function)
mean_reward = self.reward_sum / self.episode_steps
if rew_fn_type.__str__ is not object.__str__:
self.logger.record(f"{self.reward_function}/ep_rew_mean", mean_reward)
else:
self.logger.record(f"{rew_fn_type.__name__}/ep_rew_mean", mean_reward)
self.logger.dump(self.global_steps)
self.reward_sum = 0
self.episode_steps = 0
self.global_steps += 1
self.reward_function.reset(initial_state)
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
rew = self.reward_function.get_reward(player, state, previous_action)
self.reward_sum += rew
self.episode_steps += 1
return rew
def get_final_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
rew = self.reward_function.get_final_reward(player, state, previous_action)
self.reward_sum += rew
self.episode_steps += 1
return rew
class SB3CombinedLogReward(CombinedReward):
def __init__(
self,
reward_functions: Tuple[RewardFunction, ...],
reward_weights: Optional[Tuple[float, ...]] = None,
file_location: str = 'combinedlogfiles'
):
"""
Creates the combined reward using multiple rewards, and a potential set
of weights for each reward. Will also log the weighted rewards to
the model's logger if a SB3CombinedLogRewardCallback is provided to the
learner.
:param reward_functions: Each individual reward function.
:param reward_weights: The weights for each reward.
:param file_location: The path to the directory that will be used to
transfer reward info
"""
super().__init__(reward_functions, reward_weights)
# Make sure there is a folder to dump to
os.makedirs(file_location, exist_ok=True)
self.file_location = f'{file_location}/rewards.txt'
self.lockfile = f'{file_location}/reward_lock'
# Initiates the array that will store the episode totals
self.returns = np.zeros(len(self.reward_functions))
# Obtain the lock
while True:
try:
open(self.lockfile, 'x')
break
except FileExistsError:
pass
except PermissionError:
pass
except Exception as e:
print(f'Error obtaining lock in SB3CombinedLogReward.__init__:\n{e}')
# Empty the file by opening in w mode
with open(self.file_location, 'w') as f:
pass
# Release the lock
try:
os.remove(self.lockfile)
except FileNotFoundError:
print('No lock to release! ')
def reset(self, initial_state: GameState):
self.returns = np.zeros(len(self.reward_functions))
super().reset(initial_state)
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
rewards = [
func.get_reward(player, state, previous_action)
for func in self.reward_functions
]
self.returns += [a * b for a, b in zip(rewards, self.reward_weights)] # store the rewards
return float(np.dot(self.reward_weights, rewards))
def get_final_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
rewards = [
func.get_final_reward(player, state, previous_action)
for func in self.reward_functions
]
# Add the rewards to the cumulative totals with numpy broadcasting
self.returns += [a * b for a, b in zip(rewards, self.reward_weights)]
# Obtain the lock
while True:
try:
open(self.lockfile, 'x')
break
except FileExistsError:
pass
except PermissionError:
pass
except Exception as e:
print(f'Error obtaining lock in SB3CombinedLogReward.get_final_reward:\n{e}')
# Write the rewards to file and reset
with open(self.file_location, 'a') as f:
f.write('\n' + json.dumps(self.returns.tolist()))
# reset the episode totals
self.returns = np.zeros(len(self.reward_functions))
# Release the lock
try:
os.remove(self.lockfile)
except FileNotFoundError:
print('No lock to release! ')
return float(
|
np.dot(self.reward_weights, rewards)
|
numpy.dot
|
import torch
from torch.autograd import Variable
from torch.autograd import Function
from torchvision import models
from torchvision import utils
import cv2
import sys
import numpy as np
import argparse
from network5 import MGN
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=False,help='Use NVIDIA GPU acceleration')
parser.add_argument('--image-path', type=str, default='/home/wangminjie/Desktop/wmj/projects/Part-reID_2/CAM/1.png',help='Input image path')
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
return args
def preprocess_image(img):
means=[0.485, 0.456, 0.406]
stds=[0.229, 0.224, 0.225]
preprocessed_img = img.copy()[: , :, ::-1]
for i in range(3):
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]
preprocessed_img = \
np.ascontiguousarray(
|
np.transpose(preprocessed_img, (2, 0, 1))
|
numpy.transpose
|
import math
import multiprocessing
import itertools
import glob
import sys
import time
import re
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits as pyfits
from scipy.optimize import fmin_powell
from scipy.interpolate import RectBivariateSpline
from . import kepio, kepmsg, kepkey, kepplot, kepfit, kepfunc
from .utils import PyKEArgumentHelpFormatter
__all__ = ['kepprfphot']
def kepprfphot(infile, prfdir, columns, rows, fluxes, border=0,
background=False, focus=False, ranges='0,0', xtol=1e-4,
ftol=1e-2, qualflags=False, outfile=None, plot=False, overwrite=False,
verbose=False, logfile='kepprfphot.log'):
"""
kepprfphot -- Fit a PSF model to time series observations within a Target
Pixel File
Parameters
----------
nfile : str
The name of a MAST standard format FITS file containing Kepler Target
Pixel data within the first data extension.
columns : str or list
A starting guess for the CCD column position(s) of the source(s) that
are to be fit. The model is unlikely to converge if the guess is too
far away from the correct location. A rule of thumb is to provide a
guess within 1 CCD pixel of the true position. If more than one source
is being modeled then the column positions of each are separated by a
comma. The same number of sources in the columns, rows and fluxes field
is a requirement of this task.
rows : str or list
A starting guess for the CCD row position(s) of the source(s) that are
to be fit. The model is unlikely to converge if the guess is too far
away from the correct location. A rule of thumb is to provide a guess
within 1 CCD pixel of the true position. If more than one source is
being modeled then the row positions of each are separated by a comma.
The same number of sources in the columns, rows and fluxes field is a
requirement of this task.
fluxes : str or list
A starting guess for the flux(es) of the source(s) that are to be fit.
Fit convergence is not particularly reliant on the accuracy of these
guesses, but the fit will converge faster the more accurate the guess.
If more than one source is being modeled then the row positions of
each are separated by a comma. The same number of sources in the
columns, rows and fluxes field is a requirement of this task.
prfdir : str
The full or relative directory path to a folder containing the Kepler
PSF calibration. Calibration files can be downloaded from the Kepler
focal plane characteristics page at the MAST here:
http://archive.stsci.edu/missions/kepler/fpc/prf/.
border : int
If a background is included in the fit then it is modeled as a
two-dimensional polynomial. This parameter is the polynomial order.
A zero-order polynomial is generally recommended.
background : bool
Whether to include a background component in the model. If ``True``
the background will be represented by a two-dimensional polynomial of
order border. This functionality is somewhat experimental, with one eye
upon potential background gradients across large masks or on those
detectors more prone to pattern noise. Generally it is recommended to
set background as ``False``.
focus : bool
Whether to include pixel scale and focus rotation with the fit
parameters of the model. This is also an experimental function. This
approach does not attempt to deal with inter- or intra-pixel
variations. The recommended use is currently to set focus as ``False``.
ranges : str
The user can choose specific time ranges of data on which to work. This
could, for example, avoid removing known stellar flares from a dataset
Time ranges are supplied as comma-separated pairs of Barycentric Julian
Dates (BJDs). Multiple ranges are separated by a semi-colon.
An example containing two time ranges is::
'2455012.48517,2455014.50072;2455022.63487,2455025.08231'
If the user wants to correct the entire time series then providing
ranges = '0,0' will tell the task to operate on the whole time series.
xtol : float
The dimensionless, relative model parameter convergence criterion for
the fit algorithm.
ftol : float
The dimensionless, relative model residual convergence criterion for
the fit algorithm.
qualflags : bool
If qualflags is ``False``, archived observations flagged with any
quality issue will not be fit.
outfile : str
kepprfphot creates two types of output file containing fit results and
diagnostics. ``outfile.png`` contains a time series plot of fit
parameters, residuals and chi-squared. ``outfile.fits`` contains a
table of the same properties, consistent with Kepler archive light
curve files. The FITS column PSF_FLUX contains the flux time-series in
units of e-/s derived by integrating under the best-fit PRF model.
PSF_BKG provides the best-fit background (if calculated) averaged over
all mask pixels in units of e-/s/pixel. PSF_CENTR1 provides the
best-fit PSF centroid position in the CCD column direction, in CCD
pixel units. Similarly, PSF_CENTR2 provides the best-fit PSF centroid
position in the CCD row direction, in CCD pixel units. If calculated,
PSF_FOCUS1 and PSF_FOCUS2 provide scale factors in the column and row
dimensions by which the CCD pixel scale is adjusted to approximate
focus variation. PSF_ROTATION provides the angle by which the scaled
PSF model was rotated on the focal plane in order to yield a best fit.
The table column PSF_RESIDUAL provides the sum of all mask pixels
after the best-fit model has been subtracted from the data. PSF_CHI2
delivers the best-fit chi-squred statistic for each observation.
plot : bool
Plot fit results to the screen?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ kepprfphot kplr012557548-2012004120508_lpd-targ.fits.gz --columns 95
--rows 1020 --fluxes 1.0 --border 0 --prfdir ../kplr2011265_prf --xtol 1e-7 --ftol 1e-7
--plot --verbose
--------------------------------------------------------------
KEPPRFPHOT -- infile=kplr012557548-2012004120508_lpd-targ.fits.gz
columns=95 rows=1020 fluxes=1.0 border=0 background=False
focus=False prfdir=../kplr2011265_prf ranges=0,0 xtol=1e-07 ftol=1e-07
qualflags=False plot=True overwrite=True verbose=True logfile=kepprfphot.log
KEPPRFPHOT started at: Wed Jun 14 15:33:30 2017
KepID: 12557548
RA (J2000): 290.96622
Dec (J2000): 51.50472
KepMag: 15.692
SkyGroup: 4
Season: 1
Channel: 32
Module: 10
Output: 4
19% nrow = 740 t = 0.1 sec
.. image:: ../_static/images/api/kepprfphot.png
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPPRFPHOT -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' columns={}'.format(columns)
+ ' rows={}'.format(rows)
+ ' fluxes={}'.format(fluxes)
+ ' border={}'.format(border)
+ ' background={}'.format(background)
+ ' focus={}'.format(focus)
+ ' prfdir={}'.format(prfdir)
+ ' ranges={}'.format(ranges)
+ ' xtol={}'.format(xtol)
+ ' ftol={}'.format(ftol)
+ ' qualflags={}'.format(qualflags)
+ ' plot={}'.format(plot)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPPRFPHOT started at', logfile, verbose)
f = fluxes
x = columns
y = rows
nsrc = len(f)
if len(x) != nsrc or len(y) != nsrc:
errmsg = ("ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and "
"fluxes must have the same number of sources")
kepmsg.err(logfile, errmsg, verbose)
guess = list(f) + list(x) + list(y)
if background:
if border == 0:
guess.append(0.0)
else:
for i in range((border + 1) * 2):
guess.append(0.0)
if focus:
guess = guess + [1.0, 1.0, 0.0]
# overwrite output file
for i in range(nsrc):
outfilename = '{0}_{1}.fits'.format(outfile, i)
if overwrite:
kepio.overwrite(outfilename, logfile, verbose)
if kepio.fileexists(outfilename):
errmsg = 'ERROR -- KEPPRFPHOT: {} exists. Use --overwrite'.format(outfilename)
kepmsg.err(logfile, errmsg, verbose)
# open TPF FITS file
try:
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, barytime = \
kepio.readTPF(infile, 'TIME', logfile, verbose)
except:
message = 'ERROR -- KEPPRFPHOT: is %s a Target Pixel File? ' % infile
kepmsg.err(logfile,message,verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
kepio.readTPF(infile,'TIMECORR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, cadno = \
kepio.readTPF(infile,'CADENCENO',logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
kepio.readTPF(infile,'FLUX', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
kepio.readTPF(infile,'FLUX_ERR', logfile, verbose)
try:
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, poscorr1 = \
kepio.readTPF(infile, 'POS_CORR1', logfile, verbose)
except:
poscorr1 = np.zeros((len(barytime)), dtype='float32')
poscorr1[:] = np.nan
try:
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, poscorr2 = \
kepio.readTPF(infile, 'POS_CORR2', logfile, verbose)
except:
poscorr2 = np.zeros((len(barytime)), dtype='float32')
poscorr2[:] = np.nan
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, qual = \
kepio.readTPF(infile,'QUALITY',logfile,verbose)
struct = pyfits.open(infile)
tstart, tstop, bjdref, cadence = kepio.timekeys(struct, infile, logfile, verbose)
# input file keywords and mask map
cards0 = struct[0].header.cards
cards1 = struct[1].header.cards
cards2 = struct[2].header.cards
maskmap = np.copy(struct[2].data)
npix = np.size(np.nonzero(maskmap)[0])
# print target data
if verbose:
print('')
print(' KepID: {}'.format(kepid))
print(' RA (J2000): {}'.format(ra))
print('Dec (J2000): {}'.format(dec))
print(' KepMag: {}'.format(kepmag))
print(' SkyGroup: {}'.format(skygroup))
print(' Season: {}'.format(season))
print(' Channel: {}'.format(channel))
print(' Module: {}'.format(module))
print(' Output: {}'.format(output))
print('')
# read PRF file and interpolate
result = kepfunc.read_and_interpolate_prf(prfdir=prfdir, module=module,
output=output, column=column,
row=row, xdim=xdim, ydim=ydim,
verbose=verbose, logfile=logfile)
splineInterpolation = result[0]
DATx = result[1]
DATy = result[2]
PRFx = result[4]
PRFy = result[5]
# construct mesh for background model
bx = np.arange(1., float(xdim + 1))
by = np.arange(1., float(ydim + 1))
xx, yy = np.meshgrid(np.linspace(bx.min(), bx.max(), xdim),
np.linspace(by.min(), by.max(), ydim))
# Get time ranges for new photometry, flag good data
barytime += bjdref
tstart, tstop = kepio.timeranges(ranges, logfile, verbose)
incl = np.zeros((len(barytime)), dtype='int')
for rownum in range(len(barytime)):
for winnum in range(len(tstart)):
if (barytime[rownum] >= tstart[winnum]
and barytime[rownum] <= tstop[winnum]
and (qual[rownum] == 0 or qualflags)
and np.isfinite(barytime[rownum])
and np.isfinite(np.nansum(fluxpixels[rownum, :]))):
incl[rownum] = 1
if not np.in1d(1,incl):
message = ('ERROR -- KEPPRFPHOT: No legal data within the'
' range {}'.format(ranges))
kepmsg.err(logfile, message, verbose)
# filter out bad data
n = 0
nincl = (incl == 1).sum()
tim = np.zeros((nincl), 'float64')
tco = np.zeros((nincl), 'float32')
cad = np.zeros((nincl), 'float32')
flu = np.zeros((nincl, len(fluxpixels[0])), 'float32')
fer = np.zeros((nincl, len(fluxpixels[0])), 'float32')
pc1 = np.zeros((nincl), 'float32')
pc2 = np.zeros((nincl), 'float32')
qua = np.zeros((nincl), 'float32')
for rownum in range(len(barytime)):
if incl[rownum] == 1:
tim[n] = barytime[rownum]
tco[n] = tcorr[rownum]
cad[n] = cadno[rownum]
flu[n,:] = fluxpixels[rownum]
fer[n,:] = errpixels[rownum]
pc1[n] = poscorr1[rownum]
pc2[n] = poscorr2[rownum]
qua[n] = qual[rownum]
n += 1
barytime = tim * 1.0
tcorr = tco * 1.0
cadno = cad * 1.0
fluxpixels = flu * 1.0
errpixels = fer * 1.0
poscorr1 = pc1 * 1.0
poscorr2 = pc2 * 1.0
qual = qua * 1.0
# initialize plot arrays
t = np.array([], dtype='float64')
fl, dx, dy, bg, fx, fy, fa, rs, ch = [], [], [], [], [], [], [], [], []
for i in range(nsrc):
fl.append(np.array([], dtype='float32'))
dx.append(np.array([], dtype='float32'))
dy.append(np.array([], dtype='float32'))
# Preparing fit data message
progress = np.arange(nincl)
if verbose:
txt = 'Preparing...'
sys.stdout.write(txt)
sys.stdout.flush()
# single processor version
oldtime = 0.0
for rownum in range(np.min([80, len(barytime)])):
try:
if barytime[rownum] - oldtime > 0.5:
ftol = 1.0e-10; xtol = 1.0e-10
except:
pass
args = (fluxpixels[rownum, :], errpixels[rownum, :], DATx, DATy, nsrc,
border, xx, yy, PRFx, PRFy, splineInterpolation, guess, ftol,
xtol, focus, background, rownum, 80, float(x[i]),
float(y[i]), False)
guess = PRFfits(args)
ftol = ftol
xtol = xtol
oldtime = barytime[rownum]
# Fit the time series: multi-processing
anslist = []
cad1 = 0
cad2 = 50
for i in range(int(nincl/50) + 1):
try:
fluxp = fluxpixels[cad1:cad2, :]
errp = errpixels[cad1:cad2, :]
progress = np.arange(cad1, cad2)
except:
fluxp = fluxpixels[cad1:nincl, :]
errp = errpixels[cad1:nincl, :]
progress = np.arange(cad1, nincl)
try:
args = itertools.izip(fluxp, errp, itertools.repeat(DATx),
itertools.repeat(DATy),
itertools.repeat(nsrc),
itertools.repeat(border),
itertools.repeat(xx),
itertools.repeat(yy),
itertools.repeat(PRFx),
itertools.repeat(PRFy),
itertools.repeat(splineInterpolation),
itertools.repeat(guess),
itertools.repeat(ftol),
itertools.repeat(xtol),
itertools.repeat(focus),
itertools.repeat(background), progress,
itertools.repeat(np.arange(cad1,nincl)[-1]),
itertools.repeat(float(x[0])),
itertools.repeat(float(y[0])),
itertools.repeat(True))
p = multiprocessing.Pool()
model = [0.0]
model = p.imap(PRFfits, args, chunksize=1)
p.close()
p.join()
cad1 += 50; cad2 += 50
ans = np.array([np.array(item) for item in zip(*model)])
try:
anslist = np.concatenate((anslist, ans.transpose()), axis=0)
except:
anslist = ans.transpose()
guess = anslist[-1]
ans = anslist.transpose()
except:
pass
# single processor version
oldtime = 0.0; ans = []
for rownum in range(nincl):
proctime = time.time()
try:
if barytime[rownum] - oldtime > 0.5:
ftol = 1.0e-10; xtol = 1.0e-10
except:
pass
args = (fluxpixels[rownum, :], errpixels[rownum, :], DATx, DATy, nsrc,
border, xx, yy, PRFx, PRFy, splineInterpolation, guess, ftol,
xtol, focus, background, rownum, nincl, float(x[0]),
float(y[0]), True)
guess = PRFfits(args)
ans.append(guess)
ftol = ftol; xtol = xtol; oldtime = barytime[rownum]
ans = np.array(ans).transpose()
# unpack the best fit parameters
flux, OBJx, OBJy = [], [], []
na = np.shape(ans)[1]
for i in range(nsrc):
flux.append(ans[i, :])
OBJx.append(ans[nsrc + i, :])
OBJy.append(ans[nsrc * 2 + i, :])
try:
bterms = border + 1
if bterms == 1:
b = ans[nsrc * 3, :]
else:
b = np.array([])
bkg = []
for i in range(na):
bcoeff = np.array([ans[nsrc * 3:nsrc * 3 + bterms, i],
ans[nsrc * 3 + bterms:nsrc * 3 + bterms * 2, i]])
bkg.append(kepfunc.polyval2d(xx, yy, bcoeff))
b = np.append(b, np.nanmean(bkg[-1].reshape(bkg[-1].size)))
except:
b = np.zeros(na)
if focus:
wx = ans[-3, :]
wy = ans[-2, :]
angle = ans[-1, :]
else:
wx = np.ones(na)
wy = np.ones(na)
angle = np.zeros(na)
# constuct model PRF in detector coordinates
residual, chi2 = [], []
for i in range(na):
f = np.empty(nsrc)
x = np.empty(nsrc)
y = np.empty(nsrc)
for j in range(nsrc):
f[j] = flux[j][i]
x[j] = OBJx[j][i]
y[j] = OBJy[j][i]
PRFfit = kepfunc.PRF2DET(f, x, y, DATx, DATy, wx[i], wy[i], angle[i],
splineInterpolation)
if background and bterms == 1:
PRFfit = PRFfit + b[i]
if background and bterms > 1:
PRFfit = PRFfit + bkg[i]
# calculate residual of DATA - FIT
xdim = np.shape(xx)[1]
ydim = np.shape(yy)[0]
DATimg = np.empty((ydim, xdim))
n = 0
for k in range(ydim):
for j in range(xdim):
DATimg[k,j] = fluxpixels[i, n]
n += 1
PRFres = DATimg - PRFfit
residual.append(np.nansum(PRFres) / npix)
# calculate the sum squared difference between data and model
chi2.append(abs(np.nansum(np.square(DATimg - PRFfit) / PRFfit)))
# load the output arrays
otime = barytime - bjdref
otimecorr = tcorr
ocadenceno = cadno
opos_corr1 = poscorr1
opos_corr2 = poscorr2
oquality = qual
opsf_bkg = b
opsf_focus1 = wx
opsf_focus2 = wy
opsf_rotation = angle
opsf_residual = residual
opsf_chi2 = chi2
opsf_flux_err = np.empty((na))
opsf_flux_err.fill(np.nan)
opsf_centr1_err = np.empty((na))
opsf_centr1_err.fill(np.nan)
opsf_centr2_err = np.empty((na))
opsf_centr2_err.fill(np.nan)
opsf_bkg_err = np.empty((na))
opsf_bkg_err.fill(np.nan)
opsf_flux, opsf_centr1, opsf_centr2 = [], [], []
for i in range(nsrc):
opsf_flux.append(flux[i])
opsf_centr1.append(OBJx[i])
opsf_centr2.append(OBJy[i])
# load the plot arrays
t = barytime
for i in range(nsrc):
fl[i] = flux[i]
dx[i] = OBJx[i]
dy[i] = OBJy[i]
bg = b
fx = wx
fy = wy
fa = angle
rs = residual
ch = chi2
# construct output primary extension
for j in range(nsrc):
hdu0 = pyfits.PrimaryHDU()
for i in range(len(cards0)):
if cards0[i].keyword not in hdu0.header.keys():
hdu0.header[cards0[i].keyword] = (cards0[i].value,
cards0[i].comment)
else:
hdu0.header.cards[cards0[i].keyword].comment = cards0[i].comment
kepkey.history(call, hdu0, outfilename, logfile, verbose)
outstr = pyfits.HDUList(hdu0)
# construct output light curve extension
col1 = pyfits.Column(name='TIME', format='D', unit='BJD - 2454833',
array=otime)
col2 = pyfits.Column(name='TIMECORR', format='E', unit='d',
array=otimecorr)
col3 = pyfits.Column(name='CADENCENO', format='J', array=ocadenceno)
col4 = pyfits.Column(name='PSF_FLUX', format='E', unit='e-/s',
array=opsf_flux[j])
col5 = pyfits.Column(name='PSF_FLUX_ERR', format='E', unit='e-/s',
array=opsf_flux_err)
col6 = pyfits.Column(name='PSF_BKG', format='E', unit='e-/s/pix',
array=opsf_bkg)
col7 = pyfits.Column(name='PSF_BKG_ERR', format='E', unit='e-/s',
array=opsf_bkg_err)
col8 = pyfits.Column(name='PSF_CENTR1', format='E', unit='pixel',
array=opsf_centr1[j])
col9 = pyfits.Column(name='PSF_CENTR1_ERR', format='E', unit='pixel',
array=opsf_centr1_err)
col10 = pyfits.Column(name='PSF_CENTR2', format='E', unit='pixel',
array=opsf_centr2[j])
col11 = pyfits.Column(name='PSF_CENTR2_ERR', format='E', unit='pixel',
array=opsf_centr2_err)
col12 = pyfits.Column(name='PSF_FOCUS1', format='E', array=opsf_focus1)
col13 = pyfits.Column(name='PSF_FOCUS2', format='E', array=opsf_focus2)
col14 = pyfits.Column(name='PSF_ROTATION', format='E', unit='deg',
array=opsf_rotation)
col15 = pyfits.Column(name='PSF_RESIDUAL', format='E', unit='e-/s',
array=opsf_residual)
col16 = pyfits.Column(name='PSF_CHI2', format='E', array=opsf_chi2)
col17 = pyfits.Column(name='POS_CORR1', format='E', unit='pixel',
array=opos_corr1)
col18 = pyfits.Column(name='POS_CORR2', format='E', unit='pixel',
array=opos_corr2)
col19 = pyfits.Column(name='SAP_QUALITY', format='J', array=oquality)
cols = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8,
col9, col10, col11, col12, col13, col14, col15,
col16, col17, col18, col19])
hdu1 = pyfits.BinTableHDU.from_columns(cols)
for i in range(len(cards1)):
if (cards1[i].keyword not in hdu1.header.keys()
and cards1[i].keyword[:4] not in ['TTYP', 'TFOR', 'TUNI',
'TDIS', 'TDIM', 'WCAX',
'1CTY', '2CTY', '1CRP',
'2CRP', '1CRV', '2CRV',
'1CUN', '2CUN', '1CDE',
'2CDE', '1CTY', '2CTY',
'1CDL', '2CDL', '11PC',
'12PC', '21PC', '22PC']):
hdu1.header[cards1[i].keyword] = (cards1[i].value,
cards1[i].comment)
outstr.append(hdu1)
# construct output mask bitmap extension
hdu2 = pyfits.ImageHDU(maskmap)
for i in range(len(cards2)):
if cards2[i].keyword not in hdu2.header.keys():
hdu2.header[cards2[i].keyword] = (cards2[i].value,
cards2[i].comment)
else:
hdu2.header.cards[cards2[i].keyword].comment = cards2[i].comment
outstr.append(hdu2)
# write output file
print("Writing output file {}...\n".format(outfile + '_' + str(j) + '.fits'))
outstr.writeto(outfile + '_' + str(j) + '.fits', checksum=True)
# close input structure
struct.close()
# clean up x-axis unit
barytime0 = float(int(t[0] / 100) * 100.0)
t -= barytime0
t =
|
np.insert(t,[0],[t[0]])
|
numpy.insert
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
""" Please see the def main() function for code description."""
""" libraries """
import numpy as np
import os
|
np.set_printoptions(linewidth=200)
|
numpy.set_printoptions
|
from typing import Dict, Iterable, Union
from collections import defaultdict
import contextlib
from copy import deepcopy
import io
import numpy as np
import pandas as pd
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
class KeypointHandler:
"""Keypoint evaluation utility.
For keypoint evaluation, there should only be one category id for both ground truth and detection annotations.
Predicted annotations are matched greedily (based on bounding box IOU) with ground truth annotations.
Distances are computed between matched prediction and ground truth keypoint annotations.
By default, ground truth annotations with `'ignore'` flag set to 1 or those missing ground truth
keypoints are not counted. As a result, you may see fewer ground truth annotations than those annotated.
"""
def __init__(self, gt_coco, dt_coco):
self.gt_coco = gt_coco
self.dt_coco = dt_coco
assert gt_coco.getCatIds() == dt_coco.getCatIds(), "gt cats: {}\ndt cats: {}".format(gt_coco.getCatIds(),
dt_coco.getCatIds())
assert len(gt_coco.getCatIds()) == 1, "Expected one category, got {}".format(len(gt_coco.getCatIds()))
self._prepare()
def _prepare(self):
with contextlib.redirect_stdout(io.StringIO()):
coco_eval = COCOeval(self.gt_coco, self.dt_coco, iouType="bbox")
coco_eval.params.areaRng = [[0, 10000000000.0]]
coco_eval.params.iouThrs = [0.]
coco_eval.evaluate()
eval_imgs = {(x["image_id"], x["category_id"]): x for x in coco_eval.evalImgs if x is not None}
self._mapping = self._init_mapping(eval_imgs.values())
return coco_eval
def _init_mapping(self, eval_imgs):
"""
Creates dictionary of gt_ids default ids and
N: # ground truth detection instances
K: # keypoints
"""
gt_coco = self.gt_coco
dt_coco = self.dt_coco
mappings = defaultdict(list)
for img_data in eval_imgs:
valid_gt = ~(img_data["gtIgnore"].astype(np.bool))
gt_ids = np.array(img_data["gtIds"])[valid_gt] # skip over gt annotations that we should ignore
gt_matches = (img_data["gtMatches"][:, valid_gt]).flatten()
# Loop over pairs of ground truth annotation ids and matched detection annotation ids
for gt_id, dt_id in zip(list(gt_ids), list(gt_matches)):
gt_ann = gt_coco.loadAnns(int(gt_id))[0]['keypoints']
if dt_id:
dt_ann = dt_coco.loadAnns(int(dt_id))[0]['keypoints']
else:
# If no detection is made, then the distances are infinite.
dt_ann = np.inf * np.ones(len(gt_ann))
dt_ann[2::3] = 0
# Do not compute distances over gt annotations that do not have keypoints.
if not gt_ann:
continue
gt_x, gt_y, gt_v = gt_ann[::3], gt_ann[1::3], gt_ann[2::3]
dt_x, dt_y, dt_c = dt_ann[::3], dt_ann[1::3], dt_ann[2::3]
# Distance, visibility, and confidence for each keypoint.
distance = np.sqrt(
(np.asarray(gt_x) - np.asarray(dt_x)) ** 2 + (np.asarray(gt_x) - np.asarray(dt_x)) ** 2)
visibility = np.asarray(gt_v)
confidence = np.asarray(dt_c)
info = {
"gt_id": gt_id, # shape (N,)
"dt_id": dt_id, # shape (N, )
"distance": distance, # shape (N, K)
"visibility": visibility, # shape (N, K)
"confidence": confidence, # shape (N, K)
}
for k, v in info.items():
mappings[k].append(v)
for k, v in mappings.items():
mappings[k] = np.asarray(v)
return mappings
def num_instances(self):
return len(self.mappings["gt_id"])
@property
def mapping(self):
return deepcopy(self._mapping)
def __str__(self):
s = "Num instances: {}\n".format(len(self._mapping["gt_id"]))
s += "Num detections: {}\n".format(sum(self._mapping["dt_id"] != 0))
s += "Num keypoints: {}\n".format(self._mapping["visibility"].shape[1])
return s
def _format_reference(self, reference, distance, num_keypoints, mappings, keypoint_names):
"""Computes and formats ground truth references into array of shape (N, K).
N: Number of instances
K: Number of keypoints
Returns:
np.ndarray: Shape `(N, K)`.
"""
if isinstance(reference, (int, float)):
gt_reference = reference * np.ones(len(self.gt_coco.loadAnns(mappings["gt_id"])))
else:
if reference == "torso_diameter" and "torso_diameter" not in self.gt_coco.loadAnns(mappings["gt_id"])[0]:
kp_names = {v: k for k, v in keypoint_names.items()}
lh_idx = kp_names["left_hip"] - 1
rs_idx = kp_names["right_shoulder"] - 1
gt_reference = []
for ann in self.gt_coco.loadAnns(mappings["gt_id"]):
if "keypoints" not in ann or not ann["keypoints"]:
gt_reference.append(float("nan"))
continue
kps = ann["keypoints"]
x, y, v = kps[::3], kps[1::3], kps[2::3]
if v[lh_idx] == 0 or v[rs_idx] == 0:
gt_reference.append(float("nan"))
continue
dist = np.sqrt((x[lh_idx] - x[rs_idx])**2 + (y[lh_idx] - y[rs_idx])**2)
gt_reference.append(dist)
else:
try:
gt_reference = [ann[reference] for ann in self.gt_coco.loadAnns(mappings["gt_id"])]
except KeyError as _:
raise KeyError("reference {} not found in annotation file".format(reference))
if reference == "bbox":
gt_reference = [max(x[2], x[3]) for x in gt_reference]
assert all([isinstance(x, (int, float)) for x in gt_reference])
gt_reference = np.stack([np.asarray(gt_reference)] * num_keypoints, axis=-1) # shape (N, K)
return gt_reference
def compute_pck(self,
alphas: Iterable[float] =
|
np.linspace(0, 1, 11)
|
numpy.linspace
|
"""
Author: <NAME> (<EMAIL>, http://personales.upv.es/jon)
Version: 2.0
Date: October 2016
Universitat Politecnica de Valencia
Technical University of Valencia TU.VLC
Gaussian Mixture Models
"""
import os
import sys
import numpy
class GMM:
"""
n_components: one by default
covar_type: can be 'diagonal' or 'full' or 'tied' or 'tied_diagonal' or 'spherical'
dim: two by default
"""
covar_types = ['diagonal', 'full', 'tied', 'tied_diagonal', 'spherical']
covar_diagonal_types = ['diagonal', 'tied_diagonal', 'spherical']
covar_tied_types = ['tied', 'tied_diagonal']
def __init__(self, n_components = 1, dim = 2, covar_type = 'diagonal', min_var = 1.0e-5, _for_accumulating = False):
if covar_type not in GMM.covar_types:
raise Exception('GMM(): incorrect covar type: %s' % covar_type)
self.min_var = min_var
self.covar_type = covar_type
self.dim = dim
#
self.log_2_pi = dim * numpy.log(2 * numpy.pi)
#
self.n_components = n_components
#
self.prioris = numpy.ones(n_components) / n_components
self.mu = []
self.sigma = []
self.L = []
self.sigma_diag_inv = []
for c in range(self.n_components):
self.mu.append(numpy.zeros(dim))
self.sigma.append(numpy.zeros([dim, dim]))
self.sigma_diag_inv.append(numpy.ones(dim))
self.L.append(numpy.zeros([dim, dim]))
#
self.log_prioris = numpy.log(self.prioris)
self.log_determinants = numpy.ones(n_components)
#
self.acc_posteriors = numpy.zeros(n_components)
self.acc_sample_counter = numpy.zeros(n_components)
self.log_likelihood = 0.0
#
if not _for_accumulating:
identity = numpy.identity(self.dim)
for c in range(self.n_components):
self.sigma[c][:, :] = identity[:, :]
#
self.compute_derived_parameters()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def initialize_from(self, samples):
if type(samples) == list:
sample = samples[0]
else:
sample = samples
if len(sample.shape) != 2 or sample.shape[1] != self.dim:
raise Exception('GMM.initialize_from(): received an incorrect sample for this GMM')
#
self.prioris = numpy.ones(self.n_components) / self.n_components
#
for c in range(self.n_components):
#self.mu[c][:] = samples[ numpy.random.randint(len(samples)) , :]
if type(samples) == list:
self.mu[c] = samples[numpy.random.randint(len(samples))][0].copy()
else:
self.mu[c] = samples[numpy.random.randint(len(samples))].copy()
self.sigma[c] = numpy.identity(self.dim)
#
self.compute_derived_parameters()
# ------------------------------------------------------------------------------
def initialize_from_centroids(self, centroids):
if type(centroids) == list or (type(centroids) == numpy.ndarray and len(centroids.shape) == 2):
if len(centroids) != self.n_components:
raise Exception('GMM.initialize_from_centroids() needs as many centroids as GMM components')
for c in range(self.n_components):
self.mu[c] = centroids[c].copy()
elif type(centroids) == numpy.ndarray and len(centroids.shape) == 1:
if self.n_components != 1:
raise Exception('GMM.initialize_from_centroids() needs as many centroids as GMM components')
self.mu[0] = centroids.copy()
else:
raise Exception('GMM.initialize_from_centroids() received an unexpected configuration')
self.prioris = numpy.ones(self.n_components) / self.n_components
self.compute_derived_parameters()
# ------------------------------------------------------------------------------
def update_parameters(self, other):
#
if not isinstance(other, GMM):
raise Exception('GMM.update_parameters(): received an improper object instead of another GMM')
if self.n_components != other.n_components:
raise Exception('GMM.update_parameters(): received an GMM object incompatible with the current one')
#
self.prioris = other.acc_posteriors / other.acc_sample_counter.sum()
#if other.acc_sample_counter.min() == 0:
# raise Exception('GMM.update_parameters(): gaussian %d with zero samples' % other.acc_sample_counter.argmin())
if self.prioris.min() < 1.0e-200:
other.save_to_text('wrong-gmm')
raise Exception('GMM.update_parameters(): gaussian %d with zero probability' % self.prioris.argmin())
#while self.prioris.min() < 1.0e-200:
# other.remove_gaussian(self.prioris.argmin())
# self.prioris = other.acc_posteriors / other.acc_sample_counter.sum()
if abs(self.prioris.sum() - 1.0) > 1.0e-5:
other.save_to_text('wrong-gmm')
raise Exception('GMM.update_parameters(): sum of prioris is not equal to one: %e ' % self.prioris.sum())
self.log_prioris = numpy.log(self.prioris)
for c in range(self.n_components):
#
self.mu[c] = other.mu[c] / other.acc_posteriors[c]
self.sigma[c] = other.sigma[c] / other.acc_posteriors[c]
if self.covar_type in GMM.covar_diagonal_types:
self.sigma[c] = self.sigma[c] - numpy.diag(self.mu[c] * self.mu[c])
else:
self.sigma[c] = self.sigma[c] - numpy.outer(self.mu[c], self.mu[c])
#
for i in range(self.dim):
self.sigma[c][i, i] = max(self.sigma[c][i, i], self.min_var)
#
# This is needed in the splitting process
self.acc_sample_counter[c] = other.acc_sample_counter[c]
#
self.compute_derived_parameters()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def compute_derived_parameters(self):
#
self.log_prioris = numpy.log(self.prioris)
#
if self.covar_type == 'spherical' :
identity = numpy.identity(self.dim)
for c in range(self.n_components):
self.sigma[c][:, :] = identity[:, :]
elif self.covar_type in GMM.covar_tied_types:
#_sigma = numpy.average(self.sigma, axis = 0)
_sigma = sum(self.sigma) / self.n_components
for c in range(self.n_components):
self.sigma[c][:, :] = _sigma
for c in range(self.n_components):
#
if self.covar_type == 'spherical' :
self.L[c] = numpy.identity(self.dim)
self.log_determinants[c] = 2 * numpy.log(numpy.diag(self.L[c])).sum() # det(sigma) = det(L)*det(L)
elif self.covar_type in GMM.covar_diagonal_types:
self.L[c] = numpy.diag(numpy.sqrt(numpy.diag(self.sigma[c])))
self.log_determinants[c] = numpy.log(numpy.diag(self.sigma[c])).sum()
else:
try:
self.L[c] = numpy.linalg.cholesky(self.sigma[c])
except Exception as e:
self.save_to_text('wrong-gmm')
print(c)
print(numpy.diag( self.sigma[c]))
print(self.sigma[c])
#sys.exit(100)
raise e
self.log_determinants[c] = 2 * numpy.log(numpy.diag(self.L[c])).sum() # det(sigma) = det(L)*det(L)
# We compute this in any case, but it is used only when working with diagonal covariance matrices.
self.sigma_diag_inv[c] = 1.0 / numpy.diag(self.sigma[c])
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def mahalanobis(self, sample):
#
if len(sample.shape) > 1 or sample.shape[0] != self.dim:
raise Exception('GMM.mahalanobis(): received an incorrect sample for this GMM')
#
_dists = numpy.zeros(self.n_components)
if self.covar_type in ['full', 'tied'] :
for c in range(self.n_components):
d = sample - self.mu[c]
v = numpy.linalg.solve(self.L[c], d)
_dists[c] = numpy.dot(v, v)
else:
for c in range(self.n_components):
d = sample - self.mu[c]
d = d * d
d *= self.sigma_diag_inv[c]
_dists[c] = d.sum()
return _dists
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def mahalanobis_batch(self, sample):
#
if len(sample.shape) != 2 or sample.shape[0] != self.dim:
raise Exception('GMM.mahalanobis_batch(): received an incorrect sample for this GMM ' + str(sample.shape) + ' ' + str(self.dim))
#
_dists = numpy.zeros([self.n_components, sample.shape[1]])
if self.covar_type in ['full', 'tied'] :
"""
In the case of full covariance matrix self.L[c] contains the Cholesky decomposition of self.sigma[c],
the covariance matrix of class 'c'.
Then for each sample we compute v in L*v = d, by solving the equation, where d is the difference vector
of the sample with respect to the mean of the class 'c'.
As v = d*L^-1, then v*v, the dot product, is d.T*L.T^-1 * L^-1 * d, that is the Mahalanobis distance
"""
for c in range(self.n_components):
for i in range(sample.shape[1]):
d = sample[:, i] - self.mu[c]
v = numpy.linalg.solve(self.L[c], d)
_dists[c,i] = numpy.dot(v, v)
else:
"""
In the case of diagonal covariance matrices, computing the Mahalanobis distance is very simple.
We can directly divide the squared distances by sigma, the diagonal covariance matrix. But this
vector with the main diagonal of the covariance matrix must be expanded thans to numpy.newaxis
to a number of columns matching the number of samples in the batch. Finally .sum(axis=0) computes
the Mahalanobis distances of the samples with respect to the mean of the class 'c'.
"""
for c in range(self.n_components):
d = sample - self.mu[c][:, numpy.newaxis]
d = d * d
d *= self.sigma_diag_inv[c][:, numpy.newaxis]
_dists[c] = d.sum(axis = 0)
"""
_dists is a (C x B) matrix, where C is the number of classes in the GMM and B the number of samples
in the batch. Each component is the Mahalanobis distance of the sample 'i' to the class 'c'.
"""
return _dists
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def log_densities(self, sample, with_a_priori_probs = True):
_dists = self.mahalanobis(sample)
if with_a_priori_probs:
return self.log_prioris - 0.5 * (_dists + self.log_determinants + self.log_2_pi)
else:
return - 0.5 * (_dists + self.log_determinants + self.log_2_pi)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def log_densities_batch(self, sample, with_a_priori_probs = True):
_dists = self.mahalanobis_batch(sample)
"""
_dists is a matrix of (C x B) where C is the number of classes in the GMM and B is the number of samples in the batch
Thanks to numpy in Python we can applay the formula to compute the log_densities (log of conditional probability densities)
in one line of code, because Python expands it properly over all the components. Sometimes we have to explicity tell numpy
to expand some arrays to match the dimension of other arrays. That's why we use numpy.newaxis
so:
_log_densities is a matrix of (C x B) where C is the number of classes in the GMM and B is the number of samples in the batch
"""
if with_a_priori_probs:
_log_densities = self.log_prioris[:, numpy.newaxis] - 0.5 * (_dists + self.log_determinants[:,numpy.newaxis] + self.log_2_pi)
else:
_log_densities = - 0.5 * (_dists + self.log_determinants[:,numpy.newaxis] + self.log_2_pi)
#
return _log_densities
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def posteriors(self, sample):
_log_densities = self.log_densities(sample)
#print(" ".join(' {:10.4e}'.format(x) for x in _log_densities))
_max_log_density = _log_densities.max()
_densities = numpy.exp(_log_densities - _max_log_density)
_log_likelihood = numpy.log(_densities.sum()) + _max_log_density
#print(" ".join(' {:10.4e}'.format(x) for x in _densities))
return _densities / _densities.sum(), _log_likelihood
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def classify(self, sample):
_posteriors, _logL = self.posteriors(sample)
return _posteriors.argmax()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def posteriors_batch(self, sample):
_log_densities = self.log_densities_batch(sample)
_max_log_density = _log_densities.max(axis = 0)
_densities =
|
numpy.exp(_log_densities - _max_log_density)
|
numpy.exp
|
# -*- coding: utf-8 -*-
from __future__ import print_function,division,absolute_import
import logging
log = logging.getLogger(__name__)
import numpy as np
|
np.seterr(all='ignore')
|
numpy.seterr
|
"""
Library
"""
import os, sys
from pathlib import Path
current_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(current_dir, ".."))
CurDir = Path(current_dir)
import torch
import torch.nn as nn
import numpy as np
import time
import math
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
from src.algorithms.time2vec.time2vec import SineActivation
import torch
from tqdm import tqdm
torch.cuda.is_available()
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from datetime import datetime
today = datetime.strftime(datetime.today(), "%Y-%m-%d")
import yfinance as yf
# R"""equest historical data for past 5 years
class args:
lr = 1e-4
batch_size = 32
grad_norm = 0.7
device = "cuda:1"
time_dim = 16
num_layers = 1
dropout = 0.1
nhead = 8
n_epoch = 10000
n_log_interval = 50
save_folder_name = "m_time2vec_transformer"
stock_start_date = "2015-01-01"
scheduler_step_size = 5
scheduler_gamma = 0.9
train_length = 1300
WINDOW_SIZE = 32
df = yf.download("^GSPC", start=args.stock_start_date, end=today)
df.columns = [i.replace(" ", "_") for i in list(df)]
target_col = "Adj_Close"
"""
Data Preprocesisng
"""
from sklearn.preprocessing import MinMaxScaler
scaled_data = []
for col in list(df):
min_, max_ = df[col].min(), df[col].max()
min_value = 0.9 * min_
max_value = 1.1 * max_
scaled_data.append(np.array([min_value, max_value]).reshape(-1, 1))
else:
scaled_info = np.hstack(scaled_data)
col_order = list(df)
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(scaled_info)
df[col_order] = scaler.transform(df[col_order].values)
df = df.reset_index(drop=False)
df["date"] = pd.to_datetime(df["Date"])
df["year"] = df["date"].dt.year
df["month"] = df["date"].dt.month
df["day"] = df["date"].dt.day
df["dayofweek"] = df["date"].dt.dayofweek
df["dayofmonth"] = df["date"].dt.days_in_month
df["dayofyear"] = df["date"].dt.dayofyear
df["weekday"] = df["date"].dt.weekday
df["weekofyear"] = df["date"].dt.weekofyear
df.drop(columns=["year", "date", "Date"], inplace=True)
all_data = pd.get_dummies(
df, columns=["month", "day", "dayofweek", "dayofmonth", "dayofyear", "weekday", "weekofyear"]
)
def create_inout_sequences(input_data, target_data, tw, output_window):
input_seq = []
output_seq = []
L = len(input_data)
for i in range(L - tw):
train_seq = input_data[i : i + tw]
train_label = target_data[i + output_window : i + tw + output_window]
input_seq.append(train_seq)
output_seq.append(train_label)
return
|
np.array(input_seq)
|
numpy.array
|
import math
from typing import List
import numba
import numpy as np
import pyarrow as pa
class ByteVector:
"""
Builder that constructs a buffer based on byte-sized chunks.
As the memory is owned by this object but we cannot override __del__,
you need to explicitly call delete() to free the native memory.
"""
def __init__(self, initial_size: int):
self.buf = [] # type: List[numba.byte]
def delete(self):
pass
def append(self, byte):
"""Append a single byte to the stream."""
self.buf.append(byte)
def append_uint32(self, i32):
"""Append an unsigned 32bit integer."""
self.buf.append(np.uint8(np.uint32(i32) & np.uint32(0xFF)))
self.buf.append(np.uint8((np.uint32(i32) & np.uint32(0xFF00)) >> np.uint32(8)))
self.buf.append(
np.uint8((np.uint32(i32) & np.uint32(0xFF0000)) >> np.uint32(16))
)
self.buf.append(
np.uint8((np.uint32(i32) & np.uint32(0xFF000000)) >> np.uint32(24))
)
def append_int16(self, i16):
"""Append a signed 16bit integer."""
self.buf.append(np.uint8(i16 & np.uint16(0xFF)))
self.buf.append(np.uint8((i16 & np.uint16(0xFF00)) >> np.uint16(8)))
def append_int32(self, i32):
"""Append a signed 32bit integer."""
self.append_uint32(i32)
def append_int64(self, i64):
"""Append a signed 64bit integer."""
self.buf.append(np.uint8(np.uint64(i64) & np.uint64(0xFF)))
self.buf.append(np.uint8((np.uint64(i64) & np.uint64(0xFF00)) >> np.uint64(8)))
self.buf.append(
np.uint8((np.uint64(i64) & np.uint64(0xFF0000)) >> np.uint64(16))
)
self.buf.append(
np.uint8((np.uint64(i64) & np.uint64(0xFF000000)) >> np.uint64(24))
)
self.buf.append(
np.uint8((np.uint64(i64) & np.uint64(0xFF00000000)) >> np.uint64(32))
)
self.buf.append(
np.uint8((np.uint64(i64) & np.uint64(0xFF0000000000)) >> np.uint64(40))
)
self.buf.append(
np.uint8((
|
np.uint64(i64)
|
numpy.uint64
|
#!/usr/bin/env python3
import numpy
import scipy.stats
import multiprocessing
def symmetricSTDP(t_post, t_pre, release):
STDPmax=1.0
STDPtau=70.0 #ms
return STDPmax*release*numpy.sum(numpy.exp(-0.5*((t_post-t_pre)/STDPtau)**2.0))
def gauss(x, sigma):
return numpy.exp(-0.5*(x/sigma)**2)/numpy.sqrt(2.0*numpy.pi)/sigma
def vonmises(x,beta):
return numpy.exp(beta*numpy.cos(x))*0.5/numpy.pi/scipy.special.i0(beta)
def gen_spiketrain(PFmax, PFwidth, phase_width, theta_phase_init, neuron_num, simlen, PFcenter):
spike=[]
for i in range(neuron_num):
spike.append([])
for t in range(simlen):
pos=float(t)/float(simlen)
theta_phase=2.0*numpy.pi*pos*8.0+theta_phase_init
precess=numpy.pi*(PFcenter-pos)
rate=PFmax*gauss(pos-PFcenter,PFwidth)*vonmises(theta_phase-precess,phase_width)
for i in range(neuron_num):
if numpy.random.rand()<rate[i]:
spike[i].append(t)
return spike
def simulate_plasticity(spike_time, neuron_num, bias_len):
U=0.37
tauSTD=150.0 #ms
tauSTF=40.0 #ms
#simulate short-term plasticity
release=[]
for n in range(neuron_num):
release.append([])
STD=1.0
STF=U+0.0
for i in range(len(spike_time[n])):
#recovery
if i>0:
STD=1.0-(1.0-STD)*numpy.exp(-(spike_time[n][i]-spike_time[n][i-1])/tauSTD)
STF=U-(U-STF)*numpy.exp(-(spike_time[n][i]-spike_time[n][i-1])/tauSTF)
#release
release[n].append(STD*STF)
STD=STD-STF*STD
STF=STF+U*(1.0-STF)
#simulate STDP
deltaw=numpy.zeros(neuron_num)
for j in range(len(spike_time[bias_len])): #pre
for n in range(neuron_num): #post
if n!=bias_len:
for i in range(len(spike_time[n])):
deltaw[n]+=symmetricSTDP(spike_time[n][i], spike_time[bias_len][j], release[bias_len][j])
return numpy.sum(deltaw[:bias_len])-numpy.sum(deltaw[bias_len+1:])
def stat_test(deltaw_bias):
wilcox=scipy.stats.wilcoxon(deltaw_bias, zero_method="wilcox")
binom=scipy.stats.binom_test(len(deltaw_bias[deltaw_bias>0.0]), len(deltaw_bias), p=0.5, alternative="two-sided")
return [numpy.mean(deltaw_bias), wilcox[0], wilcox[1], len(deltaw_bias[deltaw_bias>0.0])/len(deltaw_bias), binom]
def eval_bias(q, rand_seed):
numpy.random.seed(seed=rand_seed)
neuron_num=81
bias_len=40
simlen=1000 #ms
PFcenter=2.0*numpy.arange(neuron_num)/float(neuron_num-1)-0.5
Nsample=100
PFwidth=0.2
rate_sigma=50.0
rate_time=numpy.arange(simlen)
PFmax=0.14*numpy.random.rand()+0.01 #kHz
phase_width=9.9*numpy.random.rand()+0.1
deltaw_bias=
|
numpy.zeros(Nsample)
|
numpy.zeros
|
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
import matplotlib.pyplot as plt
def plot_roc_curve( y_predict_proba, y_truth):
y_score = np.array(y_predict_proba)
if len(y_truth.shape) == 1:
dummies = pd.get_dummies(y_truth)
y_dummies = dummies.values
else:
y_dummies = y_truth
y_classes = dummies.columns
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
thresholds = dict()
roc_auc = dict()
for i, class_name in enumerate(y_classes):
fpr[i], tpr[i], thresholds[i] = roc_curve(y_dummies[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_dummies.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
plt.figure()
lw = 2
for i, class_name in enumerate(y_classes):
plt.plot(fpr[i], tpr[i],
lw=lw, label='%s (area = %0.2f)' % (class_name, roc_auc[i]))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
# threshold for positive class
ax2 = plt.gca().twinx()
ax2.plot(fpr[1], thresholds[1], markeredgecolor='r', linestyle='dashed', color='r')
ax2.set_ylabel('Threshold')
ax2.set_ylim([thresholds[1][-1], thresholds[1][0]])
ax2.set_xlim([fpr[1][0], fpr[1][-1]])
# plt.show()
return plt.gcf()
def plot_precision_recall_curve(y_predict_proba, y_truth):
y_score = np.array(y_predict_proba)
if len(y_truth.shape) == 1:
dummies = pd.get_dummies(y_truth)
y_dummies = dummies.values
else:
y_dummies = y_truth
y_classes = dummies.columns
for i, class_name in enumerate(y_classes):
precision, recall, thresholds = precision_recall_curve(y_dummies[:, i], y_score[:, i])
plt.step(recall, precision,
label=class_name,
lw=2,
where='post')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.legend(loc="lower left")
# ax2 = plt.gca().twinx()
# ax2.plot(recall[1:], thresholds, markeredgecolor='r',linestyle='dashed', color='r')
# ax2.set_ylabel('Threshold')
# plt.show()
return plt.gcf()
def plot_confidence_performance(y_predict, y_predict_proba, y_truth, num_bins=20):
predicted_probabilities = np.max(y_predict_proba, axis=1)
is_correct = (y_truth == y_predict)
ax = sns.regplot(x=predicted_probabilities, y=is_correct, x_bins=num_bins)
plt.xlabel('Model Confidence')
plt.ylabel('Average accuracy')
# plt.show()
return plt.gcf()
def plot_confusion_matrix(y_true, y_pred, classes=None,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
if classes is not None:
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
else:
classes = unique_labels(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return fig
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
# Begin CHANGES
fst_empty_cell = (columnwidth - 3) // 2 * " " + "t/p" + (columnwidth - 3) // 2 * " "
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = " " * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell
# Print header
print(" " + fst_empty_cell, end=" ")
# End CHANGES
for label in labels:
print("%{0}s".format(columnwidth) % label, end=" ")
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=" ")
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print()
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean =
|
np.mean(train_scores, axis=1)
|
numpy.mean
|
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.autograd.functional import hessian
from torch.autograd.functional import jacobian as jac
import progressbar
tod = torch.distributions
from tensorflow.keras.utils import Progbar
import pkbar
from torch.distributions import Categorical
class Condpf(torch.nn.Module):
def __init__(self, model, param):
super().__init__()
self.model = model
self.mu = model.mu
self.sigma = model.sigma
self.llg = model.likelihood_logscale
self.l = param[0]
self.T = param[1]
self.N = param[2]
self.dx = param[3]
self.dy = param[4]
self.initial_val = param[5]
# output shape (2**l, N, dx), input shape (N, dx)
def unit_path_update(self, x):
hl = 2 ** (-self.l)
x_out = torch.zeros(int(2 ** (self.l) + 1), x.shape[0], x.shape[-1])
x_out[0] = x
for dt in range(2 ** self.l):
dw = torch.randn(x.shape[0], self.dx, 1) * np.sqrt(hl)
x_out[dt + 1] = x_out[dt] + self.mu(x_out[dt]) * hl + (self.sigma(x_out[dt]) @ dw)[..., 0]
return x_out[1:]
# Identital paths update
def driving_update(self, x, x1):
hl = 2 ** (-self.l)
x_out = torch.zeros(int(2 ** (self.l) + 1), x.shape[0], x.shape[-1])
x1_out = torch.zeros(int(2 ** (self.l) + 1), x1.shape[0], x1.shape[-1])
x_out[0] = x
x1_out[0] = x1
for dt in range(2 ** self.l):
dw = torch.randn(x.shape[0], self.dx, 1) * np.sqrt(hl)
x_out[dt + 1] = x_out[dt] + self.mu(x_out[dt]) * hl + (self.sigma(x_out[dt]) @ dw)[..., 0]
x1_out[dt + 1] = x1_out[dt] + self.mu(x1_out[dt]) * hl + (self.sigma(x1_out[dt]) @ dw)[..., 0]
return x_out[1:], x1_out[1:]
# Coupled finer & coarser update in unit time, (2**l, N, dx) (2**(l-1), N, dx)
def coupled_update(self, x1, x2):
hl = 2 ** (-self.l)
hlm1 = 2 ** (-self.l + 1)
x1_out = torch.zeros(int(2 ** (self.l) + 1), x1.shape[0], x1.shape[-1])
x2_out = torch.zeros(int(2 ** (self.l - 1) + 1), x2.shape[0], x2.shape[-1])
x1_out[0] = x1
x2_out[0] = x2
for dt1 in range(2 ** (self.l - 1)):
dw1 = torch.randn(x1.shape[0], self.dx, 1) * np.sqrt(hl)
dw2 = torch.randn(x2.shape[0], self.dx, 1) * np.sqrt(hl)
dw = dw1 + dw2
x1_out[2 * dt1 + 1] = x1_out[2 * dt1] + self.mu(x1_out[2 * dt1]) * hl + (self.sigma(x1_out[2 * dt1]) @ dw1)[
..., 0]
x1_out[2 * dt1 + 2] = x1_out[2 * dt1 + 1] + self.mu(x1_out[2 * dt1 + 1]) * hl + \
(self.sigma(x1_out[2 * dt1 + 1]) @ dw2)[..., 0]
x2_out[dt1 + 1] = x2_out[dt1] + self.mu(x2_out[dt1]) * hlm1 + (self.sigma(x2_out[dt1]) @ dw)[..., 0]
return x1_out[1:], x2_out[1:]
# initial path generation, output shape (T*2**l+1, dx)
def initial_path_gen(self):
un = torch.zeros(self.getind(self.T) + 1, 1, self.dx) + self.initial_val
for t in range(self.T):
start_ind = self.getind(t)
update_ind = self.getind(t + 1)
un[start_ind + 1:update_ind + 1] = self.unit_path_update(un[start_ind])
return torch.squeeze(un)
# Resampling input multi-dimensional particle x
def resampling(self, weight, gn, x):
N = self.N
ess = 1 / ((weight ** 2).sum())
if ess <= (N / 2):
## Sample with uniform dice
dice = np.random.random_sample(N)
## np.cumsum obtains CDF out of PMF
bins = np.cumsum(weight)
bins[-1] = np.max([1, bins[-1]])
## np.digitize gets the indice of the bins where the dice belongs to
x_hat = x[:, np.digitize(dice, bins), :]
## after resampling we reset the accumulating weight
gn = torch.zeros(N)
if ess > (N / 2):
x_hat = x
return x_hat, gn
# Resampling input multi-dimensional particle x
def pure_resampling(self, weight, gn, x):
N = self.N
## Sample with uniform dice
dice = np.random.random_sample(N)
## np.cumsum obtains CDF out of PMF
bins = np.cumsum(weight)
bins[-1] = np.max([1, bins[-1]])
## np.digitize gets the indice of the bins where the dice belongs to
x_hat = x[:, np.digitize(dice, bins), :]
## after resampling we reset the accumulating weight
gn = torch.zeros(N)
return x_hat, gn
# Sampling out according to the weight
def sample_output(self, weight, x):
## Sample with uniform dice
dice = np.random.random_sample(1)
## np.cumsum obtains CDF out of PMF
bins = np.cumsum(weight)
bins[-1] = np.max([1, bins[-1]])
## np.digitize gets the indice of the bins where the dice belongs to
x_hat = x[:, np.digitize(dice, bins), :]
## return the sampled particle path
return torch.squeeze(x_hat)
def getind(self, t):
return int(2 ** (self.l) * t)
def getcind(self, t):
return int(2 ** (self.l - 1) * t)
# input_path of shape (2**l*T+1, dx)
def condpf_kernel(self, input_path, observe_path):
un = torch.zeros(self.getind(self.T) + 1, self.N, self.dx) + self.initial_val
un_hat = torch.zeros(self.getind(self.T) + 1, self.N, self.dx) + self.initial_val
gn = torch.zeros(self.N)
for t in range(self.T):
start_ind = self.getind(t)
un[:start_ind + 1] = un_hat[:start_ind + 1]
# Euler update
update_ind = self.getind(t + 1)
un[start_ind + 1:update_ind + 1] = self.unit_path_update(un[start_ind])
# Main point for conditional PF is that the last particle is fixed, and it joins the resampling process
un[:, -1] = input_path
# Cumulating weight function
gn = self.llg(un[update_ind], observe_path[t + 1]) + gn
what = torch.exp(gn - torch.max(gn))
wn = what / torch.sum(what)
wn = wn.detach().numpy()
# Resampling
un_hat[:update_ind + 1], gn = self.resampling(wn, gn, un[:update_ind + 1])
un_hat[:, -1] = input_path
# Sample out a path and output it
return self.sample_output(wn, un)
# Markov chain generation with CondPF, initial chain generated with built-in function
def chain_gen_condpf(self, num_step, observe_path):
x_chain = torch.zeros(num_step + 1, self.getind(self.T) + 1, self.dx) + self.initial_val
x_chain[0] = self.initial_path_gen()
for step in range(num_step):
x_chain[step + 1] = self.condpf_kernel(x_chain[step], observe_path)
return x_chain
# Driving CCPF, both paths has same discretization levels and uses same BM in update
def drive_ccpf_kernel(self, input_path1, input_path2, observe_path):
un1 = torch.zeros(self.getind(self.T) + 1, self.N, self.dx) + self.initial_val
un1_hat = torch.zeros(self.getind(self.T) + 1, self.N, self.dx) + self.initial_val
un2 = torch.zeros(self.getind(self.T) + 1, self.N, self.dx) + self.initial_val
un2_hat = torch.zeros(self.getind(self.T) + 1, self.N, self.dx) + self.initial_val
gn1 = torch.zeros(self.N)
gn2 = torch.zeros(self.N)
for t in range(self.T):
start_ind1 = self.getind(t)
start_ind2 = self.getind(t)
un1[:start_ind1 + 1] = un1_hat[:start_ind1 + 1]
un2[:start_ind2 + 1] = un2_hat[:start_ind2 + 1]
# Euler update
update_ind1 = self.getind(t + 1)
update_ind2 = self.getind(t + 1)
un1[start_ind1 + 1:update_ind1 + 1], un2[start_ind2 + 1:update_ind2 + 1] = self.driving_update(
un1[start_ind1], un2[start_ind2])
# Main point for conditional PF is that the last particle is fixed, and it joins the resampling process
un1[:, -1] = input_path1
un2[:, -1] = input_path2
# Cumulating weight function
gn1 = self.llg(un1[update_ind1], observe_path[t + 1]) + gn1
what1 = torch.exp(gn1 - torch.max(gn1))
wn1 = what1 / torch.sum(what1)
wn1 = wn1.detach().numpy()
gn2 = self.llg(un2[update_ind2], observe_path[t + 1]) + gn2
what2 = torch.exp(gn2 - torch.max(gn2))
wn2 = what2 / torch.sum(what2)
wn2 = wn2.detach().numpy()
# Resampling
un1_hat[:update_ind1 + 1], gn1, un2_hat[:update_ind2 + 1], gn2 = self.coupled_maximal_resampling(wn1, wn2,
gn1, gn2,
un1[
:update_ind1 + 1],
un2[
:update_ind2 + 1])
un1_hat[:, -1] = input_path1
un2_hat[:, -1] = input_path2
# Sample out a path and output it
path1_output, path2_output = self.coupled_maximal_sample(wn1, wn2, un1, un2, 1)
return path1_output[:, 0, :], path2_output[:, 0, :]
def coupled_maximal_sample(self, weight1, weight2, x1, x2, N):
# Initialize
x1_hat = torch.zeros(x1.shape[0], N, self.dx)
x2_hat = torch.zeros(x2.shape[0], N, self.dx)
# Calculating many weights
unormal_min_weight = np.minimum(weight1, weight2)
min_weight_sum = np.sum(unormal_min_weight)
min_weight = unormal_min_weight / min_weight_sum
unormal_reduce_weight1 = weight1 - unormal_min_weight
unormal_reduce_weight2 = weight2 - unormal_min_weight
## Sample with uniform dice
dice = np.random.random_sample(N)
## [0] takes out the numpy array which is suitable afterwards
coupled = np.where(dice <= min_weight_sum)[0]
independ = np.where(dice > min_weight_sum)[0]
ncoupled = np.sum(dice <= min_weight_sum)
nindepend = np.sum(dice > min_weight_sum)
if ncoupled >= 0:
dice1 = np.random.random_sample(ncoupled)
bins = np.cumsum(min_weight)
bins[-1] = np.max([1, bins[-1]])
x1_hat[:, coupled, :] = x1[:, np.digitize(dice1, bins), :]
x2_hat[:, coupled, :] = x2[:, np.digitize(dice1, bins), :]
## nindepend>0 implies min_weight_sum>0 imples np.sum(unormal_reduce_weight*) is positive, thus the division won't report error
if nindepend > 0:
reduce_weight1 = unormal_reduce_weight1 / np.sum(unormal_reduce_weight1)
reduce_weight2 = unormal_reduce_weight2 / np.sum(unormal_reduce_weight2)
dice2 = np.random.random_sample(nindepend)
bins1 = np.cumsum(reduce_weight1)
bins1[-1] = np.max([1, bins1[-1]])
bins2 = np.cumsum(reduce_weight2)
bins2[-1] = np.max([1, bins2[-1]])
x1_hat[:, independ, :] = x1[:, np.digitize(dice2, bins1), :]
x2_hat[:, independ, :] = x2[:, np.digitize(dice2, bins2), :]
return x1_hat, x2_hat
def coupled_maximal_resampling(self, weight1, weight2, gn1, gn2, x1, x2):
ess = 1 / ((weight1 ** 2).sum())
if ess <= (self.N / 2):
# When resampling happens, unormalized likelihood function reset
gn1 = torch.zeros(self.N)
gn2 = torch.zeros(self.N)
# Maimal coupled sampling
x1_hat, x2_hat = self.coupled_maximal_sample(weight1, weight2, x1, x2, self.N)
if ess > (self.N / 2):
x1_hat, x2_hat = x1, x2
return x1_hat, gn1, x2_hat, gn2
def initial_lag_2path(self, observe_path):
hl = 2 ** (-self.l)
time_len = self.getind(self.T)
## Initial value
un1 = torch.zeros(time_len + 1, 1, self.dx) + self.initial_val
un2 = torch.zeros(time_len + 1, 1, self.dx) + self.initial_val
## Coupled Propagation
for t in range(self.T):
start_ind = self.getind(t)
# Euler update
update_ind = self.getind(t + 1)
un1[start_ind + 1:update_ind + 1] = self.unit_path_update(un1[start_ind])
un2[start_ind + 1:update_ind + 1] = self.unit_path_update(un2[start_ind])
## Lag-one forward for the first path
un1_lag_one_forward = self.condpf_kernel(un1[:, 0, :], observe_path)
return un1_lag_one_forward, un2[:, 0, :]
# generate a chain of coupled particles with Driving CCPF of length 'num_step+1', including the starting position
# both paths use same BM in update
def chain_gen_dccpf(self, num_step, observe_path):
x1_chain = torch.zeros(num_step + 1, self.getind(self.T) + 1, self.dx)
x2_chain = torch.zeros(num_step + 1, self.getind(self.T) + 1, self.dx)
x1_chain[0], x2_chain[0] = self.initial_lag_2path(observe_path)
for step in range(num_step):
x1_chain[step + 1], x2_chain[step + 1] = self.drive_ccpf_kernel(x1_chain[step], x2_chain[step],
observe_path)
return x1_chain, x2_chain
# function for test
def any_coupled_2path(self):
hl = 2 ** (-self.l)
time_len1 = self.getind(self.T)
time_len2 = self.getcind(self.T)
## Initial value
un1 = torch.randn(time_len1 + 1, self.dx) + self.initial_val
un2 = torch.randn(time_len2 + 1, self.dx) + self.initial_val
return un1, un2
# function for test
def chain_gen_ccpf(self, num_step, observe_path):
x1_chain = torch.zeros(num_step + 1, self.getind(self.T) + 1, self.dx)
x2_chain = torch.zeros(num_step + 1, self.getcind(self.T) + 1, self.dx)
x1_chain[0], x2_chain[0] = self.any_coupled_2path()
for step in range(num_step):
x1_chain[step + 1], x2_chain[step + 1] = self.ccpf_kernel(x1_chain[step], x2_chain[step], observe_path)
return x1_chain, x2_chain
# Coupled Conditional Particl Filter Markov Kernel, two path are coupled path of level l and l-1
def ccpf_kernel(self, input_path1, input_path2, observe_path):
un1 = torch.zeros(self.getind(self.T) + 1, self.N, self.dx) + self.initial_val
un1_hat = torch.zeros(self.getind(self.T) + 1, self.N, self.dx) + self.initial_val
un2 = torch.zeros(self.getcind(self.T) + 1, self.N, self.dx) + self.initial_val
un2_hat = torch.zeros(self.getcind(self.T) + 1, self.N, self.dx) + self.initial_val
gn1 = torch.zeros(self.N)
gn2 = torch.zeros(self.N)
for t in range(self.T):
start_ind1 = self.getind(t)
start_ind2 = self.getcind(t)
un1[:start_ind1 + 1] = un1_hat[:start_ind1 + 1]
un2[:start_ind2 + 1] = un2_hat[:start_ind2 + 1]
# Euler update
update_ind1 = self.getind(t + 1)
update_ind2 = self.getcind(t + 1)
un1[start_ind1 + 1:update_ind1 + 1], un2[start_ind2 + 1:update_ind2 + 1] = self.coupled_update(
un1[start_ind1], un2[start_ind2])
# Main point for conditional PF is that the last particle is fixed, and it joins the resampling process
un1[:, -1] = input_path1
un2[:, -1] = input_path2
# Cumulating weight function
gn1 = self.llg(un1[update_ind1], observe_path[t + 1]) + gn1
what1 = torch.exp(gn1 - torch.max(gn1))
wn1 = what1 / torch.sum(what1)
wn1 = wn1.detach().numpy()
gn2 = self.llg(un2[update_ind2], observe_path[t + 1]) + gn2
what2 = torch.exp(gn2 - torch.max(gn2))
wn2 = what2 / torch.sum(what2)
wn2 = wn2.detach().numpy()
# Resampling
un1_hat[:update_ind1 + 1], gn1, un2_hat[:update_ind2 + 1], gn2 = self.coupled_maximal_resampling(wn1, wn2,
gn1, gn2,
un1[
:update_ind1 + 1],
un2[
:update_ind2 + 1])
un1_hat[:, -1] = input_path1
un2_hat[:, -1] = input_path2
# Sample out a path and output it
path1_output, path2_output = self.coupled_maximal_sample(wn1, wn2, un1, un2, 1)
return path1_output[:, 0, :], path2_output[:, 0, :]
# Generated two coupled particle paths, one of them is lagged-one forward through Coupled Conditional Particle Filter
def initial_lag_4path(self, observe_path):
hl = 2 ** (-self.l)
time_len = self.getind(self.T)
time_len1 = self.getcind(self.T)
## Initial value
un1 = torch.zeros(time_len + 1, 1, self.dx) + self.initial_val
un2 = torch.zeros(time_len + 1, 1, self.dx) + self.initial_val
cn1 = torch.zeros(time_len1 + 1, 1, self.dx) + self.initial_val
cn2 = torch.zeros(time_len1 + 1, 1, self.dx) + self.initial_val
## Independent Propagation of two coupled particle paths pairs
for t in range(self.T):
start_ind1 = self.getind(t)
start_ind2 = self.getcind(t)
update_ind1 = self.getind(t + 1)
update_ind2 = self.getcind(t + 1)
# Euler Update
un1[start_ind1 + 1:update_ind1 + 1], cn1[start_ind2 + 1:update_ind2 + 1] = self.coupled_update(
un1[start_ind1], cn1[start_ind2])
un2[start_ind1 + 1:update_ind1 + 1], cn2[start_ind2 + 1:update_ind2 + 1] = self.coupled_update(
un2[start_ind1], cn2[start_ind2])
## Lag-one forward for the first path, note that we input only one pair of coupled particle paths into ccpf kernel
un1_lag_forward, cn1_lag_forward = self.ccpf_kernel(un1[:, 0, :], cn1[:, 0, :], observe_path)
four_path = un1_lag_forward, cn1_lag_forward, un2[:, 0, :], cn2[:, 0, :]
return four_path
# Two Coupled Finer & Coraser update in unit time, output (2**l, N, dx) (2**(l-1), N, dx) (2**l, N, dx) (2**(l-1), N, dx)
# Input shape (N, dx) (N, dx) (N, dx) (N, dx)
def twocoupled_update(self, u1, c1, u2, c2):
hl = 2 ** (-self.l)
hlm1 = 2 ** (-self.l + 1)
# Initialize
u1_out = torch.zeros(int(2 ** (self.l) + 1), u1.shape[0], u1.shape[-1])
c1_out = torch.zeros(int(2 ** (self.l - 1) + 1), c1.shape[0], c1.shape[-1])
u2_out = torch.zeros(int(2 ** (self.l) + 1), u2.shape[0], u2.shape[-1])
c2_out = torch.zeros(int(2 ** (self.l - 1) + 1), c2.shape[0], c2.shape[-1])
# Initial values input
u1_out[0], c1_out[0], u2_out[0], c2_out[0] = u1, c1, u2, c2
# Coupled Euler Update
for dt1 in range(2 ** (self.l - 1)):
dw1 = torch.randn(u1.shape[0], self.dx, 1) * np.sqrt(hl)
dw2 = torch.randn(u1.shape[0], self.dx, 1) * np.sqrt(hl)
dw = dw1 + dw2
u1_out[2 * dt1 + 1] = u1_out[2 * dt1] + self.mu(u1_out[2 * dt1]) * hl + (self.sigma(u1_out[2 * dt1]) @ dw1)[
..., 0]
u1_out[2 * dt1 + 2] = u1_out[2 * dt1 + 1] + self.mu(u1_out[2 * dt1 + 1]) * hl + \
(self.sigma(u1_out[2 * dt1 + 1]) @ dw2)[..., 0]
c1_out[dt1 + 1] = c1_out[dt1] + self.mu(c1_out[dt1]) * hlm1 + (self.sigma(c1_out[dt1]) @ dw)[..., 0]
u2_out[2 * dt1 + 1] = u2_out[2 * dt1] + self.mu(u2_out[2 * dt1]) * hl + (self.sigma(u2_out[2 * dt1]) @ dw1)[
..., 0]
u2_out[2 * dt1 + 2] = u2_out[2 * dt1 + 1] + self.mu(u2_out[2 * dt1 + 1]) * hl + \
(self.sigma(u2_out[2 * dt1 + 1]) @ dw2)[..., 0]
c2_out[dt1 + 1] = c2_out[dt1] + self.mu(c2_out[dt1]) * hlm1 + (self.sigma(c2_out[dt1]) @ dw)[..., 0]
return u1_out[1:], c1_out[1:], u2_out[1:], c2_out[1:]
# Get the coupled resampling index through rejection sampling technique of length 'N'
# Here 'N' does not need to be self.N
def maximal_rejection_sample_indice(self, weight1, weight2, N):
## Step 1
dice1 = np.random.random_sample(N)
bins1 = np.cumsum(weight1)
bins1[-1] = np.max([1, bins1[-1]])
indice1 = np.digitize(dice1, bins1)
u_sample = np.random.random_sample(N) * weight1[indice1]
## Initialization
indice2 = np.zeros(indice1.shape).astype(int)
## Step 1 Accepted: Identical indices
step1_accepted = np.where(u_sample <= weight2[indice1])[0]
indice2[step1_accepted] = indice1[step1_accepted]
## Step 1 Rejected
step1_rejected = np.where(u_sample > weight2[indice1])[0]
step1_num_rejected = step1_rejected.shape[0]
## Step 2
nrejected = step1_num_rejected
rejected = step1_rejected
# step 2 terminate when every indice is accepted
while (nrejected != 0):
dice2 = np.random.random_sample(nrejected)
bins2 = np.cumsum(weight2)
bins2[-1] = np.max([1, bins2[-1]])
indice2[rejected] = np.digitize(dice2, bins2)
## We only deal with indice2[rejected], which is the particles that got rejected in Step 1.
v_sample = np.random.random_sample(nrejected) * weight2[indice2[rejected]]
## Step 2 Accepted: Sample indice2 independently.
## step2_accepted is the index of indice2 that got accepted in Step 2.
step2_accepted = rejected[np.where(v_sample >= weight1[indice2[rejected]])[0]]
## Step 2 Rejected: Repeat Step 2 again
## rejected is the index of indice2 that got rejected in Step 2.
rejected = rejected[np.where(v_sample < weight1[indice2[rejected]])[0]]
nrejected = rejected.shape[0]
return indice1, indice2
# Coupled Maximal Resample 'N' coupled particle pairs, based on (x1,x2) associated with weights (weight1, weight2)
# Shape of x1: (-1, self.N, self.dx), shape of weight1: (self.N)
def coupled_maximal_rejection_sample(self, weight1, weight2, x1, x2, N):
## Initialization
x1_hat = torch.zeros(x1.shape)
x2_hat = torch.zeros(x2.shape)
## Sample Indices for both particles collection (N fine particles, N coarse particles)
indice1, indice2 = self.maximal_rejection_sample_indice(weight1, weight2, N)
## Get the resampled
x1_hat = x1[:, indice1, :]
x2_hat = x2[:, indice2, :]
return x1_hat, x2_hat
# Adaptive Maximally coupled resampling through rejection sampling method
def coupled_maximal_rejection_resampling(self, weight1, weight2, gn1, gn2, x1, x2):
ess = 1 / ((weight1 ** 2).sum())
if ess <= (self.N / 2):
# When resampling happens, unormalized likelihood function reset
gn1 = torch.zeros(self.N)
gn2 = torch.zeros(self.N)
# Maimal coupled sampling
x1_hat, x2_hat = self.coupled_maximal_rejection_sample(weight1, weight2, x1, x2, self.N)
if ess > (self.N / 2):
x1_hat, x2_hat = x1, x2
return x1_hat, gn1, x2_hat, gn2
# indice1 has shape (N), here N does not have to be equal to self.N
def maximal_fix_index(self, weight1, weight2, indice1, N):
## Step 1: Truncated with known indice1
u_sample = np.random.random_sample(N) * weight1[indice1]
## Initialization
indice2 = np.zeros(indice1.shape).astype(int)
## Step 1 Accepted: Identical indices
step1_accepted = np.where(u_sample <= weight2[indice1])[0]
indice2[step1_accepted] = indice1[step1_accepted]
## Step 1 Rejected
step1_rejected = np.where(u_sample > weight2[indice1])[0]
step1_num_rejected = step1_rejected.shape[0]
## Step 2
nrejected = step1_num_rejected
rejected = step1_rejected
# step 2 terminate when every indice is accepted
while (nrejected != 0):
dice2 = np.random.random_sample(nrejected)
bins2 = np.cumsum(weight2)
bins2[-1] = np.max([1, bins2[-1]])
indice2[rejected] = np.digitize(dice2, bins2)
## We only deal with indice2[rejected], which is the particles that got rejected in Step 1.
v_sample = np.random.random_sample(nrejected) * weight2[indice2[rejected]]
## Step 2 Accepted: Sample indice2 independently.
## step2_accepted is the index of indice2 that got accepted in Step 2.
step2_accepted = rejected[np.where(v_sample >= weight1[indice2[rejected]])[0]]
## Step 2 Rejected: Repeat Step 2 again
## rejected is the index of indice2 that got rejected in Step 2.
rejected = rejected[np.where(v_sample < weight1[indice2[rejected]])[0]]
nrejected = rejected.shape[0]
return indice2
# Numpy version of coupled maximal resample
def coupled_maximal_sample_numpy(self, weight1, weight2, x1, x2, N):
# Initialize
x1_hat = np.zeros((x1.shape[0], N))
x2_hat = np.zeros((x2.shape[0], N))
# Calculating many weights
unormal_min_weight = np.minimum(weight1, weight2)
min_weight_sum = np.sum(unormal_min_weight)
min_weight = unormal_min_weight / min_weight_sum
unormal_reduce_weight1 = weight1 - unormal_min_weight
unormal_reduce_weight2 = weight2 - unormal_min_weight
## Sample with uniform dice
dice = np.random.random_sample(N)
## [0] takes out the numpy array which is suitable afterwards
coupled =
|
np.where(dice <= min_weight_sum)
|
numpy.where
|
"""You can define a path with a list of points combined with a cross-section.
A path can be extruded using any CrossSection returning a Component
The CrossSection defines the layer numbers, widths and offsetts
Based on phidl.path
"""
from collections.abc import Iterable
from typing import Optional
import numpy as np
import shapely.ops
from phidl import path
from phidl.device_layout import Path as PathPhidl
from phidl.path import smooth as smooth_phidl
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.cross_section import CrossSection, Section, Transition
from gdsfactory.port import Port
from gdsfactory.types import (
Coordinates,
CrossSectionSpec,
Float2,
LayerSpec,
PathFactory,
)
def _simplify(points, tolerance):
import shapely.geometry as sg
ls = sg.LineString(points)
ls_simple = ls.simplify(tolerance=tolerance)
return
|
np.asarray(ls_simple.coords)
|
numpy.asarray
|
from .attitude import AttitudeTransform
from .quaternion import Quaternion
from .vector import Vector
import numpy as np
class IMU:
"""
w_meas = w_true + bias + white_noise
"""
def __init__(self,
w_true: Vector,
gyro_err: dict={
"bias_model": "no_bias",
"sampling_freq": 1.,
"bias_var": 0.,
"sensor_var": 0.,
}
) -> None:
self.w_true = w_true.val
self.gyro_err = gyro_err
self.bias = Vector([0.,0.,0.]).val
self.dt = 1. / self.gyro_err["sampling_freq"]
def apply_bias(self) -> Vector:
if self.gyro_err["bias_model"] == "random_walk":
bias_x = self.bias[0] + np.random.normal(loc=np.sqrt(self.gyro_err["bias_var"]))
bias_y = self.bias[1] + np.random.normal(loc=np.sqrt(self.gyro_err["bias_var"]))
bias_z = self.bias[2] + np.random.normal(loc=np.sqrt(self.gyro_err["bias_var"]))
self.bias = Vector([bias_x, bias_y, bias_z]).val
elif self.gyro_err["bias_model"] == "gauss_markov":
Tc = self.gyro_err["correlation_time"]
bias_x = np.exp(-self.dt/Tc)*self.bias[0] + np.random.normal(loc=np.sqrt(self.gyro_err["bias_var"]))
bias_y =
|
np.exp(-self.dt/Tc)
|
numpy.exp
|
import numpy as np
from ecos import solve
from scipy.sparse import coo_matrix , csc_matrix , issparse
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# idLogit: driver routine
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def idLogit( K , I , N , y , X , ind , constant=False , og=False , Lambdas=[1000.0,1000.0] , prints={} , **kwargs ) :
"""idLogit model estimation routine, MLE problem solved with the ECOS solver.
Calls specialized routines based on specific penalization.
Args:
K (int): the number of model features
I (int): the number of individuals for which there is data
N (int): the total number of observations
y (numpy.array): A length-N vector of choices, coded as +/- 1
X (numpy.array or scipy.sparse): A N x K
ind (list): A length-N list of individual indices (1,...,I) for each observation
constant (:obj:`bool`, optional): include a constant in the model (true), or don't (false)
Lambdas (:obj:`list`, optional): L1 and L2 penalty weights, both default to 1000
prints (:obj:`dict`, optional): List of extra setup prints to do
**kwargs: Keyword arguments passed directly to ecos.solve
Returns:
x (numpy.array): A length K (or K+1) array of estimated coefficients
"""
if( Lambdas[0] <= 0.0 ) : # no L1 penalty
if( Lambdas[1] <= 0.0 ) : # niether penalties
return idLogit_np( K , I , N , y , X , ind , constant=constant , og=og , prints=prints , **kwargs )
else : # L2 penalty only
return idLogit_l2( K , I , N , y , X , ind , constant=constant , og=og , prints=prints , **kwargs )
else : # Lambdas[0] > 0
if( Lambdas[1] <= 0.0 ) : # L1 penalty only
return idLogit_l1( K , I , N , y , X , ind , constant=constant , og=og , prints=prints , **kwargs )
else : # both penalties
return idLogit_en( K , I , N , y , X , ind , constant=constant , og=og , prints=prints , **kwargs )
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def idLogit_np( K , I , N , y , X , ind , constant=False , og=False , prints={} , **kwargs ) :
"""idLogit with no penalties.
Technically, a _very_ underdetermined problem, and the coefficients are trivial.
Args:
K (int): the number of model features
I (int): the number of individuals for which there is data
N (int): the total number of observations
y (numpy.array): A length-N vector of choices, coded as +/- 1
X (numpy.array or scipy.sparse): A N x K
ind (list): A length-N list of individual indices (1,...,I) for each observation
constant (:obj:`bool`, optional): include a constant in the model (true), or don't (false)
Lambda1 (:obj:`float`, optional): L1 penalty weight, defaults to 1000
prints (:obj:`dict`, optional): List of extra setup prints to do
**kwargs: Keyword arguments passed directly to ecos.solve
Returns:
x (numpy.array): A length K (or K+1) array of estimated coefficients
"""
return
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def idLogit_l1( K , I , N , y , X , ind , constant=False , og=False , Lambda1=1000.0 , prints={} , **kwargs ) :
"""idLogit model estimation with L1 penalty, MLE solved with the ECOS solver.
Args:
K (int): the number of model features
I (int): the number of individuals for which there is data
N (int): the total number of observations
y (numpy.array): A length-N vector of choices, coded as +/- 1
X (numpy.array or scipy.sparse): A N x K
ind (list): A length-N list of individual indices (1,...,I) for each observation
constant (:obj:`bool`, optional): include a constant in the model (true), or don't (false)
Lambda1 (:obj:`float`, optional): L1 penalty weight, defaults to 1000
prints (:obj:`dict`, optional): List of extra setup prints to do
**kwargs: Keyword arguments passed directly to ecos.solve
Returns:
x (numpy.array): A length K (or K+1) array of estimated coefficients
"""
if( Lambda1 <= 0.0 ) :
return idLogit_np( K , I , N , y , X )
IK = I * K
Ninv = 1.0 / N
if issparse(X) :
X = X.tocoo()
Xnnz = X.nnz
else :
Xnnz = X.shape[0] * X.shape[1]
Nvars = K + 3 * IK + 4 * N # b , l , u , v , w , d , p , m
Ncons = N + K + IK # u + v + w ; d(1) + ... + d(I) ; d - p + m
Annz = 3 * N + 4 * IK # 3N ; IK ; 3IK
Ncone = N + 2 * IK + 6 * N # w, p, m in Pos ; Exp variables
Gnnz = 5 * N + 2 * IK + 2 * Xnnz # N + 2IK ; 4N + 2Xnnz
# with these sizes, we can estimate the memory requirements...
# convenience list that lets us easily index sparse matrix terms
indices = np.arange( 0 , max(IK,2*N) , dtype=np.int )
# convenience values of "variable" starts and lengths in a
# concatenated Nvars-vector of all variables.
starts , length = {} , {}
starts['b'] , length['b'] = 0 , K
starts['l'] , length['l'] = starts['b'] + length['b'] , N
starts['u'] , length['u'] = starts['l'] + length['l'] , N
starts['v'] , length['v'] = starts['u'] + length['u'] , N
starts['w'] , length['w'] = starts['v'] + length['v'] , N
starts['d'] , length['d'] = starts['w'] + length['w'] , IK
starts['p'] , length['p'] = starts['d'] + length['d'] , IK
starts['m'] , length['m'] = starts['p'] + length['p'] , IK
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# COST VECTOR (ie, objective)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
c = np.zeros( Nvars , dtype=np.float_ )
c[ starts['l'] : starts['l'] + length['l'] ] = Ninv
c[ starts['p'] : starts['p'] + length['p'] ] = Lambdas[0] * Ninv
c[ starts['m'] : starts['m'] + length['m'] ] = Lambdas[0] * Ninv
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# LINEAR EQUALITY CONSTRAINTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# u + v + w = 1 N rows 3N nonzeros (each of N rows has 3 terms)
# d(1) + ... + d(I) = 0 K rows IK nonzeros (each of K rows has I terms)
# d - p + m = 0 IK rows 3IK nonzeros (each of IK rows has 3 terms)
#
Arows = np.zeros( Annz , dtype=np.int )
Acols = np.zeros( Annz , dtype=np.int )
Adata = np.ones( Annz , dtype=np.float_ ) # almost all of the constraint data terms are "1"
j , jj = 0 , 0
# u + v + w
jj = j + N
Arows[j:jj] = indices[0:N]
Acols[j:jj] = starts['u'] + indices[0:N]
j = jj
jj = j + N
Arows[j:jj] = indices[0:N]
Acols[j:jj] = starts['v'] + indices[0:N]
j = jj
jj = j + N
Arows[j:jj] = indices[0:N]
Acols[j:jj] = starts['w'] + indices[0:N]
j = jj
# d(1) + ... + d(I), stored in d "K-first"
for k in range(0,K) :
jj = j + I
Arows[j:jj] = N + k
Acols[j:jj] = starts['d'] + k + K * indices[0:I]
j = jj
# d - p + m, noting that we have to set data for "p" terms as well as rows/cols
jj = j + IK
Arows[j:jj] = N+K+indices[0:IK]
Acols[j:jj] = starts['d'] + indices[0:IK]
j = jj
jj = j + IK
Arows[j:jj] = N+K+indices[0:IK]
Acols[j:jj] = starts['p'] + indices[0:IK]
Adata[j:jj] = -1.0
j = jj
jj = j + IK
Arows[j:jj] = N+K+indices[0:IK]
Acols[j:jj] = starts['m'] + indices[0:IK]
j = jj
A = csc_matrix( (Adata,(Arows,Acols)) , shape=(Ncons,Nvars) , dtype=np.float_ );
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# LINEAR EQUALITY CONSTRAINT RHS (forecably initialize fewer terms)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if( N < IK + K ) :
b = np.zeros( Ncons , dtype=np.float_ )
b[0:N] = 1.0;
else :
b = np.ones( Ncons , dtype=np.float_ )
b[N:] = 0.0;
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DIMENSIONS OF CONIC CONSTRAINTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dims = {
'l' : N + 2*IK , # w, p, and m must be non-negative
'q' : [ 1+IK ] , # (t,d) lie in the second-order cone
'e' : 2*N # 2 triplets of Exp cone variables for each n (3N "variables")
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONIC CONSTRAINT RHS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
h = np.zeros( Ncone , dtype=np.float_ )
h[ dims['l'] + dims['q'][0] + 3*indices[0:2*N] + 2 ] = 1.0
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONIC CONSTRAINTS MATRIX
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The first N+2IK rows of G are easily described:
#
# b l u v w d p m t
#
# 0 0 0 0 -I 0 0 0 0 N rows, N non-zeros
# 0 0 0 0 0 0 -I 0 0 IK rows, IK non-zeros
# 0 0 0 0 0 0 0 -I 0 IK rows, IK non-zeros
#
# This suggests initializing Gdata entries to -1 and filling in Grows
# and Gcols accordingly.
#
# The remainder are not so easily described, but are very similar to
# cases reviewed above. Particularly, for n = 1,...,N
#
# G[ N+2IK + 3n - 1 , : ] = G[ 1+N+3IK + 6n - 1 , : ] = 0'
#
# are empty rows, and we don't have to do anything. For n = 0,...,N-1
#
# G[ N+2IK + 3n + 0 , starts['l'] + n ] = 1
# G[ N+2IK + 3n + 1 , starts['u'] + n ] = -1
# G[ N+2IK + 3n + 3 , starts['l'] + n ] = 1
# G[ N+2IK + 3n + 4 , starts['v'] + n ] = -1
#
# this is thus 4N non-zeros. Only b and d terms remain:
#
# G[ N+2IK + 3n + 0 , 0:K ] = y[n] X[:,n]'
# G[ N+2IK + 3n + 0 , start[n] : start[n] + K ] = y[n] X[:,n]'
#
# where
#
# start[n] = starts['d'] + K*(i(n)-1)
#
# There are thus 2 "Xnnz" non-zeros here, where Xnnz is the number
# of non-zeros in the X matrix. This is the only part that requires changing
# data.
Grows = np.zeros( Gnnz , dtype=np.int )
Gcols = np.zeros( Gnnz , dtype=np.int )
Gdata = - np.ones( Gnnz , dtype=np.float_ )
j , jj , base = 0 , 0 , 0
# w, p, m terms are non-negative
jj = j + N
Grows[j:jj] = base + indices[0: N]
Gcols[j:jj] = starts['w'] + indices[0:length['w']]
j = jj
base += N
jj = j + IK
Grows[j:jj] = base + indices[0:IK]
Gcols[j:jj] = starts['p'] + indices[0:length['p']]
j = jj
base += IK
jj = j + IK
Grows[j:jj] = base + indices[0:IK]
Gcols[j:jj] = starts['m'] + indices[0:length['m']]
j = jj
base += IK
# base is fixed now, because we intersperse the exponential cone terms
# u, v terms in Exp
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 1
Gcols[j:jj] = starts['u'] + indices[0:length['u']]
j = jj
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 4
Gcols[j:jj] = starts['v'] + indices[0:length['v']]
j = jj
# l terms in Exp
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 0
Gcols[j:jj] = starts['l'] + indices[0:length['l']]
Gdata[j:jj] = 1.0
j = jj
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 3
Gcols[j:jj] = starts['l'] + indices[0:length['l']]
Gdata[j:jj] = 1.0
j = jj
# b, d terms in Exp ** hardest part ** handle differently for sparse and dense X
if( issparse(X) ) :
jj = j + X.nnz
Grows[j:jj] = base + 6 * X.row
Gcols[j:jj] = starts['b'] + X.col
Gdata[j:jj] = X.data
j = jj
jj = j + X.nnz
Grows[j:jj] = base + 6 * X.row
Gcols[j:jj] = starts['d'] + length['b'] * (ind[X.row]-1) + X.col
Gdata[j:jj] = X.data
j = jj
else :
for n in range(0,N) :
data = - y[n] * X[n,:]
jj = j + length['b']
Grows[j:jj] = base + 6*n
Gcols[j:jj] = starts['b'] + indices[0:length['b']]
Gdata[j:jj] = data
j = jj
jj = j + length['b']
Grows[j:jj] = base + 6*n
Gcols[j:jj] = starts['d'] + length['b'] * (ind[n]-1) + indices[0:length['b']]
Gdata[j:jj] = data
j = jj
G = csc_matrix( (Gdata,(Grows,Gcols)) , shape=(Ncone,Nvars) , dtype=np.float_ )
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# OPTIONAL PRINTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if 'start' in prints and prints['start'] :
print( "\nVariable Starts: \n" )
print( starts )
if 'costs' in prints and prints['costs'] :
print( "\nCosts: \n" )
for k in starts :
if( np.max( np.abs( c[starts[k]:starts[k]+length[k]] ) ) == 0 ) :
pass # print( "variable: %s is zero" % k )
else :
print( "variable: %s" % k )
print( c[starts[k]:starts[k]+length[k]] )
if 'lineq' in prints and prints['lineq'] :
print( "\nLinear Equality Constraints: \n" )
Array = A.toarray()
blocks = [ N , K , IK ]
base , baseB = 0 , 0
for B in blocks :
baseB = base + B
print( "\nA: %i to %i\n" %(base,baseB) )
rows = np.arange(base,baseB)
for k in starts :
if( np.max( np.max( np.abs( Array[rows,starts[k]:starts[k]+length[k]] ) ) ) == 0 ) :
pass # print( "variable: %s is zero" % k )
else :
print( "variable: %s" % k )
print( Array[rows,starts[k]:starts[k]+length[k]] )
base = baseB
del Array
if 'lerhs' in prints and prints['lerhs'] :
print( "\nLinear Equality Constraints RHS: \n" )
blocks = [ N , K , IK ]
base , baseB = 0 , 0
for B in blocks :
baseB = base + B
print( b[base:baseB] )
base = baseB
if 'cones' in prints and prints['cones'] :
print( "\nConic Constraints: \n" )
Grray = G.toarray()
blocks = [ N , IK , IK , 6*N ]
base , baseB = 0 , 0
for B in blocks :
baseB = base + B
print( "\nG: [%i,%i)\n" %(base,baseB) )
rows = np.arange(base,baseB)
for k in starts :
if( np.max( np.max( np.abs( Grray[rows,starts[k]:starts[k]+length[k]] ) ) ) == 0 ) :
pass # print( "variable: %s is zero" % k )
else :
print( "variable: %s" % k )
print( Grray[rows,starts[k]:starts[k]+length[k]] )
base = baseB
del Grray
if 'ccrhs' in prints and prints['ccrhs'] :
print( "\nConic Constraints RHS: \n" )
blocks = [ N , IK , IK , 6*N ]
base , baseB = 0 , 0
for B in blocks :
baseB = base + B
print( h[base:baseB] )
base = baseB
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CLEANUP
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
del indices
del Arows , Acols , Adata
del Grows , Gcols , Gdata
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SOLVE ATTEMPT
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
res = solve( c , G , h , dims , A , b , **kwargs )
return res['x'][0:K] , res['info']
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def idLogit_l2( K , I , N , y , X , ind , constant=False , og=False , Lambda2=1000.0 , prints={} , **kwargs ) :
"""idLogit model estimation with L2 penalty, MLE solved with the ECOS solver.
Args:
K (int): the number of model features
I (int): the number of individuals for which there is data
N (int): the total number of observations
y (numpy.array): A length-N vector of choices, coded as +/- 1
X (numpy.array or scipy.sparse): A N x K
ind (list): A length-N list of individual indices (1,...,I) for each observation
constant (:obj:`bool`, optional): include a constant in the model (true), or don't (false)
Lambda1 (:obj:`float`, optional): L1 penalty weight, defaults to 1000
prints (:obj:`dict`, optional): List of extra setup prints to do
**kwargs: Keyword arguments passed directly to ecos.solve
Returns:
x (numpy.array): A length K (or K+1) array of estimated coefficients
"""
if( Lambda2 <= 0.0 ) :
return idLogit_np( K , I , N , y , X )
IK = I * K
Ninv = 1.0 / N
if issparse(X) :
X = X.tocoo()
Xnnz = X.nnz
else :
Xnnz = X.shape[0] * X.shape[1]
Nvars = K + IK + 4 * N + 1 # b , l , u , v , w , d , t
Ncons = N + K # u + v + w ; d(1) + ... + d(I)
Annz = 3 * N + IK # 3N ; IK
Ncone = N + 1 + IK + 6 * N # w in Pos ; (t,d) in SOC ; Exp vars
Gnnz = 1 + 5 * N + IK + 2 * Xnnz # N ; 1 + IK ; 4N + 2Xnnz
# with these sizes, we can estimate the memory requirements...
# convenience list that lets us easily index sparse matrix terms
indices = np.arange( 0 , max(IK,2*N) , dtype=np.int )
# convenience values of "variable" starts and lengths in a
# concatenated Nvars-vector of all variables.
starts , length = {} , {}
starts['b'] , length['b'] = 0 , K
starts['l'] , length['l'] = starts['b'] + length['b'] , N
starts['u'] , length['u'] = starts['l'] + length['l'] , N
starts['v'] , length['v'] = starts['u'] + length['u'] , N
starts['w'] , length['w'] = starts['v'] + length['v'] , N
starts['d'] , length['d'] = starts['w'] + length['w'] , IK
starts['t'] , length['t'] = starts['d'] + length['d'] , 1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# COST VECTOR (ie, objective)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
c = np.zeros( Nvars , dtype=np.float_ )
c[ starts['l'] : starts['l'] + length['l'] ] = Ninv
c[ starts['t'] ] = Lambdas[1] * Ninv / 2.0
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# LINEAR EQUALITY CONSTRAINTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# u + v + w = 1 N rows 3N nonzeros (each of N rows has 3 terms)
# d(1) + ... + d(I) = 0 K rows IK nonzeros (each of K rows has I terms)
#
Arows = np.zeros( Annz , dtype=np.int )
Acols = np.zeros( Annz , dtype=np.int )
Adata = np.ones( Annz , dtype=np.float_ ) # almost all of the constraint data terms are "1"
j , jj = 0 , 0
# u + v + w
jj = j + N
Arows[j:jj] = indices[0:N]
Acols[j:jj] = starts['u'] + indices[0:N]
j = jj
jj = j + N
Arows[j:jj] = indices[0:N]
Acols[j:jj] = starts['v'] + indices[0:N]
j = jj
jj = j + N
Arows[j:jj] = indices[0:N]
Acols[j:jj] = starts['w'] + indices[0:N]
j = jj
# d(1) + ... + d(I), stored in d "K-first"
for k in range(0,K) :
jj = j + I
Arows[j:jj] = N + k
Acols[j:jj] = starts['d'] + k + K * indices[0:I]
j = jj
A = csc_matrix( (Adata,(Arows,Acols)) , shape=(Ncons,Nvars) , dtype=np.float_ );
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# LINEAR EQUALITY CONSTRAINT RHS (presume N > K)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
b = np.zeros( Ncons , dtype=np.float_ )
b[0:N] = 1.0;
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DIMENSIONS OF CONIC CONSTRAINTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dims = {
'l' : N , # w must be non-negative
'q' : [ 1+IK ] , # (t,d) lie in the second-order cone
'e' : 2*N # 2 triplets of Exp cone variables for each n (3N "variables")
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONIC CONSTRAINT RHS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
h = np.zeros( Ncone , dtype=np.float_ )
h[ dims['l'] + dims['q'][0] + 3*indices[0:2*N] + 2 ] = 1.0
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONIC CONSTRAINTS MATRIX
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The first 1+N+IK rows of G are easily described:
#
# b l u v w d p m t
#
# 0 0 0 0 -I 0 0 0 0 N rows, N non-zeros
# 0 0 0 0 0 0 0 0 -1 1 row, 1 non-zero
# 0 0 0 0 0 -I 0 0 0 IK rows, IK non-zeros
#
# This suggests initializing Gdata entries to -1 and filling in Grows
# and Gcols accordingly.
#
# The remainder are not so easily described, but are very similar to
# cases reviewed above. Particularly, for n = 1,...,N
#
# G[ 1+N+3IK + 3n - 1 , : ] = G[ 1+N+3IK + 6n - 1 , : ] = 0'
#
# are empty rows, and we don't have to do anything. For n = 0,...,N-1
#
# G[ 1+N+IK + 3n + 0 , starts['l'] + n ] = 1
# G[ 1+N+IK + 3n + 1 , starts['u'] + n ] = -1
# G[ 1+N+IK + 3n + 3 , starts['l'] + n ] = 1
# G[ 1+N+IK + 3n + 4 , starts['v'] + n ] = -1
#
# this is thus 4N non-zeros. Only b and d terms remain:
#
# G[ 1+N+IK + 3n + 0 , 0:K ] = y[n] X[:,n]'
# G[ 1+N+IK + 3n + 0 , start[n] : start[n] + K ] = y[n] X[:,n]'
#
# where
#
# start[n] = starts['d'] + K*(i(n)-1)
#
# There are thus 2 "Xnnz" non-zeros here, where Xnnz is the number
# of non-zeros in the X matrix. This is the only part that requires changing
# data.
Grows = np.zeros( Gnnz , dtype=np.int )
Gcols = np.zeros( Gnnz , dtype=np.int )
Gdata = - np.ones( Gnnz , dtype=np.float_ )
j , jj , base = 0 , 0 , 0
# w, terms are non-negative
jj = j + N
Grows[j:jj] = base + indices[0: N]
Gcols[j:jj] = starts['w'] + indices[0:length['w']]
j = jj
base += N
# t and d terms in the SOC
Grows[j] , Gcols[j] = base , starts['t'] ; j += 1
base += 1
jj = j + IK
Grows[j:jj] = base + indices[0:IK]
Gcols[j:jj] = starts['d'] + indices[0:length['d']]
j = jj
base += IK
# base is fixed now, because we intersperse the exponential cone terms
# u, v terms in Exp
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 1
Gcols[j:jj] = starts['u'] + indices[0:length['u']]
j = jj
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 4
Gcols[j:jj] = starts['v'] + indices[0:length['v']]
j = jj
# l terms in Exp
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 0
Gcols[j:jj] = starts['l'] + indices[0:length['l']]
Gdata[j:jj] = 1.0
j = jj
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 3
Gcols[j:jj] = starts['l'] + indices[0:length['l']]
Gdata[j:jj] = 1.0
j = jj
# b, d terms in Exp ** hardest part ** handle differently for sparse and dense X
if( issparse(X) ) :
jj = j + X.nnz
Grows[j:jj] = base + 6 * X.row
Gcols[j:jj] = starts['b'] + X.col
Gdata[j:jj] = X.data
j = jj
jj = j + X.nnz
Grows[j:jj] = base + 6 * X.row
Gcols[j:jj] = starts['d'] + length['b'] * (ind[X.row]-1) + X.col
Gdata[j:jj] = X.data
j = jj
else :
for n in range(0,N) :
data = - y[n] * X[n,:]
jj = j + length['b']
Grows[j:jj] = base + 6*n
Gcols[j:jj] = starts['b'] + indices[0:length['b']]
Gdata[j:jj] = data
j = jj
jj = j + length['b']
Grows[j:jj] = base + 6*n
Gcols[j:jj] = starts['d'] + length['b'] * (ind[n]-1) + indices[0:length['b']]
Gdata[j:jj] = data
j = jj
G = csc_matrix( (Gdata,(Grows,Gcols)) , shape=(Ncone,Nvars) , dtype=np.float_ )
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# OPTIONAL PRINTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if 'start' in prints and prints['start'] :
print( "\nVariable Starts: \n" )
print( starts )
if 'costs' in prints and prints['costs'] :
print( "\nCosts: \n" )
for k in starts :
if( np.max( np.abs( c[starts[k]:starts[k]+length[k]] ) ) == 0 ) :
pass # print( "variable: %s is zero" % k )
else :
print( "variable: %s" % k )
print( c[starts[k]:starts[k]+length[k]] )
if 'lineq' in prints and prints['lineq'] :
print( "\nLinear Equality Constraints: \n" )
Array = A.toarray()
blocks = [ N , K ]
base , baseB = 0 , 0
for B in blocks :
baseB = base + B
print( "\nA: %i to %i\n" %(base,baseB) )
rows = np.arange(base,baseB)
for k in starts :
if( np.max( np.max( np.abs( Array[rows,starts[k]:starts[k]+length[k]] ) ) ) == 0 ) :
pass # print( "variable: %s is zero" % k )
else :
print( "variable: %s" % k )
print( Array[rows,starts[k]:starts[k]+length[k]] )
base = baseB
del Array
if 'lerhs' in prints and prints['lerhs'] :
print( "\nLinear Equality Constraints RHS: \n" )
blocks = [ N , K ]
base , baseB = 0 , 0
for B in blocks :
baseB = base + B
print( b[base:baseB] )
base = baseB
if 'cones' in prints and prints['cones'] :
print( "\nConic Constraints: \n" )
Grray = G.toarray()
blocks = [ N , 1+IK , 6*N ]
base , baseB = 0 , 0
for B in blocks :
baseB = base + B
print( "\nG: [%i,%i)\n" %(base,baseB) )
rows = np.arange(base,baseB)
for k in starts :
if( np.max( np.max( np.abs( Grray[rows,starts[k]:starts[k]+length[k]] ) ) ) == 0 ) :
pass # print( "variable: %s is zero" % k )
else :
print( "variable: %s" % k )
print( Grray[rows,starts[k]:starts[k]+length[k]] )
base = baseB
del Grray
if 'ccrhs' in prints and prints['ccrhs'] :
print( "\nConic Constraints RHS: \n" )
blocks = [ N , 1+IK , 6*N ]
base , baseB = 0 , 0
for B in blocks :
baseB = base + B
print( h[base:baseB] )
base = baseB
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CLEANUP
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
del indices
del Arows , Acols , Adata
del Grows , Gcols , Gdata
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SOLVE ATTEMPT
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
res = solve( c , G , h , dims , A , b , **kwargs )
return res['x'][0:K] , res['info']
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def idLogit_en( K , I , N , y , X , ind , constant=False , og=False , Lambdas=[1000.0,1000.0] , prints={} , **kwargs ) :
"""idLogit model estimation with Elastic Net penalty, MLE solved with the ECOS solver.
Args:
K (int): the number of model features
I (int): the number of individuals for which there is data
N (int): the total number of observations
y (numpy.array): A length-N vector of choices, coded as +/- 1
X (numpy.array or scipy.sparse): A N x K
ind (list): A length-N list of individual indices (1,...,I) for each observation
constant (:obj:`bool`, optional): include a constant in the model (true), or don't (false)
Lambda1 (:obj:`float`, optional): L1 penalty weight, defaults to 1000
prints (:obj:`dict`, optional): List of extra setup prints to do
**kwargs: Keyword arguments passed directly to ecos.solve
Returns:
x (numpy.array): A length K (or K+1) array of estimated coefficients
"""
IK = I * K
Ninv = 1.0 / N
if issparse(X) :
X = X.tocoo()
Xnnz = X.nnz
else :
Xnnz = X.shape[0] * X.shape[1]
Nvars = K + 3 * IK + 4 * N + 1 # b, l, u, v, w, d, p, m, t
Ncons = N + K + IK # u + v + w ; d(1) + ... + d(I) ; d - p + m
Annz = 3 * N + 4 * IK # 3N ; IK ; 3IK
Ncone = N + 1 + 3 * IK + 6 * N # w, p, m in Pos ; (t,d) in SOC ; Exp vars
Gnnz = 1 + 5 * N + 3 * IK + 2 * Xnnz # N + 2IK ; 1 + IK ; 4N + 2Xnnz
# with these sizes, we can estimate the memory requirements...
# convenience list that lets us easily index sparse matrix terms
indices = np.arange( 0 , max(IK,2*N) , dtype=np.int )
# convenience values of "variable" starts and lengths in a
# concatenated Nvars-vector of all variables.
starts , length = {} , {}
starts['b'] , length['b'] = 0 , K
starts['l'] , length['l'] = starts['b'] + length['b'] , N
starts['u'] , length['u'] = starts['l'] + length['l'] , N
starts['v'] , length['v'] = starts['u'] + length['u'] , N
starts['w'] , length['w'] = starts['v'] + length['v'] , N
starts['d'] , length['d'] = starts['w'] + length['w'] , IK
starts['p'] , length['p'] = starts['d'] + length['d'] , IK
starts['m'] , length['m'] = starts['p'] + length['p'] , IK
starts['t'] , length['t'] = starts['m'] + length['m'] , 1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# COST VECTOR (ie, objective)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
c = np.zeros( Nvars , dtype=np.float_ )
c[ starts['l'] : starts['l'] + length['l'] ] = Ninv
c[ starts['p'] : starts['p'] + length['p'] ] = Lambdas[0] * Ninv
c[ starts['m'] : starts['m'] + length['m'] ] = Lambdas[0] * Ninv
c[ starts['t'] ] = Lambdas[1] * Ninv / 2.0
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# LINEAR EQUALITY CONSTRAINTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# u + v + w = 1 N rows 3N nonzeros (each of N rows has 3 terms)
# d(1) + ... + d(I) = 0 K rows IK nonzeros (each of K rows has I terms)
# d - p + m = 0 IK rows 3IK nonzeros (each of IK rows has 3 terms)
#
Arows = np.zeros( Annz , dtype=np.int )
Acols = np.zeros( Annz , dtype=np.int )
Adata = np.ones( Annz , dtype=np.float_ ) # almost all of the constraint data terms are "1"
j , jj = 0 , 0
# u + v + w
jj = j + N
Arows[j:jj] = indices[0:N]
Acols[j:jj] = starts['u'] + indices[0:N]
j = jj
jj = j + N
Arows[j:jj] = indices[0:N]
Acols[j:jj] = starts['v'] + indices[0:N]
j = jj
jj = j + N
Arows[j:jj] = indices[0:N]
Acols[j:jj] = starts['w'] + indices[0:N]
j = jj
# d(1) + ... + d(I), stored in d "K-first"
for k in range(0,K) :
jj = j + I
Arows[j:jj] = N + k
Acols[j:jj] = starts['d'] + k + K * indices[0:I]
j = jj
# d - p + m, noting that we have to set data for "p" terms as well as rows/cols
jj = j + IK
Arows[j:jj] = N+K+indices[0:IK]
Acols[j:jj] = starts['d'] + indices[0:IK]
j = jj
jj = j + IK
Arows[j:jj] = N+K+indices[0:IK]
Acols[j:jj] = starts['p'] + indices[0:IK]
Adata[j:jj] = -1.0
j = jj
jj = j + IK
Arows[j:jj] = N+K+indices[0:IK]
Acols[j:jj] = starts['m'] + indices[0:IK]
j = jj
A = csc_matrix( (Adata,(Arows,Acols)) , shape=(Ncons,Nvars) , dtype=np.float_ );
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# LINEAR EQUALITY CONSTRAINT RHS (forecably initialize fewer terms)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if( N < IK + K ) :
b = np.zeros( Ncons , dtype=np.float_ )
b[0:N] = 1.0;
else :
b = np.ones( Ncons , dtype=np.float_ )
b[N:] = 0.0;
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DIMENSIONS OF CONIC CONSTRAINTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dims = {
'l' : N + 2*IK , # w, p, and m must be non-negative
'q' : [ 1+IK ] , # (t,d) lie in the second-order cone
'e' : 2*N # 2 triplets of Exp cone variables for each n (3N "variables")
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONIC CONSTRAINT RHS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
h = np.zeros( Ncone , dtype=np.float_ )
h[ dims['l'] + dims['q'][0] + 3*indices[0:2*N] + 2 ] = 1.0
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONIC CONSTRAINTS MATRIX
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The first 1+N+3IK rows of G are easily described:
#
# b l u v w d p m t
#
# 0 0 0 0 -I 0 0 0 0 N rows, N non-zeros
# 0 0 0 0 0 0 -I 0 0 IK rows, IK non-zeros
# 0 0 0 0 0 0 0 -I 0 IK rows, IK non-zeros
# 0 0 0 0 0 0 0 0 -1 1 row, 1 non-zero
# 0 0 0 0 0 -I 0 0 0 IK rows, IK non-zeros
#
# This suggests initializing Gdata entries to -1 and filling in Grows
# and Gcols accordingly.
#
# The remainder are not so easily described, but are very similar to
# cases reviewed above. Particularly, for n = 1,...,N
#
# G[ 1+N+3IK + 3n - 1 , : ] = G[ 1+N+3IK + 6n - 1 , : ] = 0'
#
# are empty rows, and we don't have to do anything. For n = 0,...,N-1
#
# G[ 1+N+3IK + 3n + 0 , starts['l'] + n ] = 1
# G[ 1+N+3IK + 3n + 1 , starts['u'] + n ] = -1
# G[ 1+N+3IK + 3n + 3 , starts['l'] + n ] = 1
# G[ 1+N+3IK + 3n + 4 , starts['v'] + n ] = -1
#
# this is thus 4N non-zeros. Only b and d terms remain:
#
# G[ 1+N+3IK + 3n + 0 , 0:K ] = y[n] X[:,n]'
# G[ 1+N+3IK + 3n + 0 , start[n] : start[n] + K ] = y[n] X[:,n]'
#
# where
#
# start[n] = starts['d'] + K*(i(n)-1)
#
# There are thus 2 "Xnnz" non-zeros here, where Xnnz is the number
# of non-zeros in the X matrix. This is the only part that requires changing
# data.
Grows = np.zeros( Gnnz , dtype=np.int )
Gcols = np.zeros( Gnnz , dtype=np.int )
Gdata = - np.ones( Gnnz , dtype=np.float_ )
j , jj , base = 0 , 0 , 0
# w, p, m terms are non-negative
jj = j + N
Grows[j:jj] = base + indices[0: N]
Gcols[j:jj] = starts['w'] + indices[0:length['w']]
j = jj
base += N
jj = j + IK
Grows[j:jj] = base + indices[0:IK]
Gcols[j:jj] = starts['p'] + indices[0:length['p']]
j = jj
base += IK
jj = j + IK
Grows[j:jj] = base + indices[0:IK]
Gcols[j:jj] = starts['m'] + indices[0:length['m']]
j = jj
base += IK
# t and d terms in the SOC
Grows[j] , Gcols[j] = base , starts['t'] ; j += 1
base += 1
jj = j + IK
Grows[j:jj] = base + indices[0:IK]
Gcols[j:jj] = starts['d'] + indices[0:length['d']]
j = jj
base += IK
# base is fixed now, because we intersperse the exponential cone terms
# u, v terms in Exp
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 1
Gcols[j:jj] = starts['u'] + indices[0:length['u']]
j = jj
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 4
Gcols[j:jj] = starts['v'] + indices[0:length['v']]
j = jj
# l terms in Exp
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 0
Gcols[j:jj] = starts['l'] + indices[0:length['l']]
Gdata[j:jj] = 1.0
j = jj
jj = j + N
Grows[j:jj] = base + 6*indices[0:N] + 3
Gcols[j:jj] = starts['l'] + indices[0:length['l']]
Gdata[j:jj] = 1.0
j = jj
# b, d terms in Exp ** hardest part ** handle differently for sparse and dense X
if( issparse(X) ) :
jj = j + X.nnz
Grows[j:jj] = base + 6 * X.row
Gcols[j:jj] = starts['b'] + X.col
Gdata[j:jj] = X.data
j = jj
jj = j + X.nnz
Grows[j:jj] = base + 6 * X.row
Gcols[j:jj] = starts['d'] + length['b'] * (ind[X.row]-1) + X.col
Gdata[j:jj] = X.data
j = jj
else :
for n in range(0,N) :
data = - y[n] * X[n,:]
jj = j + length['b']
Grows[j:jj] = base + 6*n
Gcols[j:jj] = starts['b'] + indices[0:length['b']]
Gdata[j:jj] = data
j = jj
jj = j + length['b']
Grows[j:jj] = base + 6*n
Gcols[j:jj] = starts['d'] + length['b'] * (ind[n]-1) + indices[0:length['b']]
Gdata[j:jj] = data
j = jj
G = csc_matrix( (Gdata,(Grows,Gcols)) , shape=(Ncone,Nvars) , dtype=np.float_ )
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# OPTIONAL PRINTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if 'start' in prints and prints['start'] :
print( "\nVariable Starts: \n" )
print( starts )
if 'costs' in prints and prints['costs'] :
print( "\nCosts: \n" )
for k in starts :
if( np.max(
|
np.abs( c[starts[k]:starts[k]+length[k]] )
|
numpy.abs
|
# Author: <NAME>
# Date: February 8, 2021
# Purpose: Collection of custom functions for running CMA simulations of KdV,
# analysis functions, and renormalization scripts. Generates some images
# from past papers, but not all (didn't want / need to duplicate everything)
# Translation of code from UW PhD in Matlab.
# import libraries
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from matplotlib import animation
import glob
import re
def fftnorm(u_full):
"""Computes normalized FFT (such that FFT and IFFT are symmetrically normalized)
Parameters
----------
u_full : 1D Numpy Array (N,)
The vector whose discrete FFT is to be computed
Returns
-------
normalizedFFT : 1D Numpy Array (N,)
The transformed version of that vector
"""
N = u_full.shape[0]
normalizedFFT = np.fft.fft(u_full)*1/N
return normalizedFFT
def ifftnorm(u_full):
"""Computes normalized IFFT (such that FFT and IFFT are symmetrically normalized)
Parameters
----------
u_full : 1D Numpy Array (N,)
The vector whose discrete IFFT is to be computed
Returns
-------
normalizedIFFT : 1D Numpy Array (N,)
The transformed version of that vector
"""
N = u_full.shape[0]
normalizedIFFT = np.real(np.fft.ifft(u_full)*N)
return normalizedIFFT
def convolutionSumKdV(u,v,alpha):
"""Computes convolution sum associated with RHS of KdV ODE
C_k(u,v) = -(alpha * 1i * k) / 2 * sum_{i+j = k} u_i v_j
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
u : 1D Numpy Array (N,)
One of the two vectors being convolved
v : 1D Numpy Array (N,)
One of the two vectors being convolved
alpha : float
Degree of nonlinearity in KdV
Returns
-------
convo : 1D Numpy Array (N,)
Convolution of the two vectors
"""
# generate array of wavenumbers
L = u.shape[0]
k = np.concatenate([np.arange(0,L/2),np.arange(-L/2,0)])
if v.shape[0]!=L:
raise NameError('u and v must be the same length.')
# compute double sum in real space, then apply scalar multiplier
convo = fftnorm(ifftnorm(u)*ifftnorm(v))
convo = -alpha/2*1j*k*convo
return convo
# RHS: Right hand side functions for CMA and non-renormalized KdV
def markovKdV(u,M,alpha):
"""Computes nonlinear part of Markov term in KdV
C_k(u,v) = -(alpha * 1i * k) / 2 * sum_{i+j = k} u_i v_j
where the sum of i and j is over a "full" system with M positive modes (user specified)
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
u : 1D Numpy Array (N,)
Positive modes of state vector whose RHS is being computed
M : int
Number of positive modes in "full" model for intermediary calculations
alpha : float
Degree of nonlinearity in KdV
Returns
-------
nonlin0 : 1D Numpy Array (2*M,)
Nonlinear part of Markov term for given state vector
u_full : 1D Numpy array (2*M,)
"full" state vector for use in later computations
"""
# construct full Fourier vector from only the positive modes
u_full = np.zeros(2*M) +1j*np.zeros(2*M)
u_full[0:u.shape[0]] = u
u_full[2*M-u.shape[0]+1:] = np.conj(np.flip(u[1:]))
# compute the convolution sum
nonlin0 = convolutionSumKdV(u_full,u_full,alpha)
return nonlin0,u_full
def tModelKdV(u_full,nonlin0,alpha,F_modes):
"""Computes t-model term in KdV
C_k(u,v) = -(alpha * 1i * k) / 2 * sum_{i+j = k, i and j in F} u_i v_j
where the sum of i and j is over a "full" system with M positive modes (user specified)
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
u_full : Numpy array (2M,1)
Current state of u in full form
nonlin0 : Numpy array (2M,1)
Markov term (for convolving)
alpha : float
Degree of nonlinearity in KdV
F_modes : Numpy array
Set of resolved modes (and aliasing modes) to zero out
Returns
-------
nonlin1 : 1D Numpy Array (2*M,)
t-model term
uuStar : 1D Numpy array (2*M,)
unresolved modes of state vector convolved with itself
"""
uuStar = np.copy(nonlin0)
uuStar[F_modes] = 0
nonlin1 = 2*convolutionSumKdV(u_full, uuStar, alpha)
return nonlin1,uuStar
def t2ModelKdV(u_full,nonlin0,uuStar,alpha,F_modes,G_modes,k,epsilon):
"""Computes second order ROM term in KdV
*see paper / symbolic notebook for expression*
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
u_full : Numpy array (2M,1)
Current state of u in full form
nonlin0 : Numpy array (2M,1)
Markov term (for convolving)
uuStar : 1D Numpy array (2*M,)
Unresolved modes of state vector convolved with itself
alpha : float
Degree of nonlinearity in KdV
F_modes : Numpy array
Set of resolved modes (and aliasing modes) to zero out
G_modes : Numpy array
Set of unresolved modes (and aliasing modes) to zero out
k : Numpy array (2M,1)
Array of wavenumbers
epsilon : float
Size of linear term (stiffness)
Returns
-------
nonlin2 : 1D Numpy Array (2*M,)
t2-model term
uk3 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^3
uu : 1D Numpy array (2*M,)
Resolved modes of state vector convolved with itself
A, AStar, B, BStar, C, CStar, D, DStar : 1D Numpy arrays (2*M,)
Specific convolutions used as inner terms in future terms
"""
# compute inner convolutions
uu = np.copy(nonlin0)
uu[G_modes] = 0
uk3 = k**3*u_full
A = k**3*uu
AStar = k**3*uuStar
B = convolutionSumKdV(1j*epsilon**2*uk3+uu,u_full,alpha)
BStar = np.copy(B)
B[G_modes] = 0
BStar[F_modes] = 0
C = convolutionSumKdV(uuStar,u_full,alpha)
CStar = np.copy(C)
C[G_modes] = 0
CStar[F_modes] = 0
D = convolutionSumKdV(uuStar,uuStar,alpha)
DStar = np.copy(D)
D[G_modes] = 0
DStar[F_modes] = 0
# compute actual term
nonlin2 = -2*convolutionSumKdV(u_full,1j*epsilon**2*AStar - 2*BStar + 2*CStar,alpha) - 2*D
return nonlin2,uk3,uu,A,AStar,B,BStar,C,CStar,D,DStar
def t3ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,A,AStar,B,BStar,C,CStar,DStar):
"""Computes third order ROM term in KdV
*see paper / symbolic notebook for expression*
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
alpha : float
Degree of nonlinearity in KdV
F_modes : Numpy array
Set of resolved modes (and aliasing modes) to zero out
G_modes : Numpy array
Set of unresolved modes (and aliasing modes) to zero out
k : Numpy array (2M,1)
Array of wavenumbers
epsilon : float
Size of linear term (stiffness)
u_full : Numpy array (2M,1)
Current state of u in full form
uu : 1D Numpy array (2*M,)
Resolved modes of state vector convolved with itself
uuStar : 1D Numpy array (2*M,)
Unresolved modes of state vector convolved with itself
uk3 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^3
A, AStar, B, BStar, C, CStar, DStar : 1D Numpy arrays (2*M,)
Specific convolutions used as inner terms in future terms
Returns
-------
nonlin3 : 1D Numpy Array (2*M,)
t3-model term
uk6 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^6
nonlin3,uk6,E,EStar,F,FStar
E, EStar, F, FStar : 1D Numpy arrays (2*M,)
Specific convolutions used as inner terms in future terms
"""
# compute internal convolutions
uk6 = k**3*uk3
E = convolutionSumKdV(1j*epsilon**2*uk3+uu,1j*epsilon**2*uk3+uu,alpha)
EStar = np.copy(E)
E[G_modes] = 0
EStar[F_modes] = 0
F = convolutionSumKdV(uuStar,1j*epsilon**2*uk3+uu,alpha)
FStar = np.copy(F)
F[G_modes] = 0
FStar[F_modes] = 0
int1 = -2*BStar+CStar
int2 = (convolutionSumKdV(u_full,
-epsilon**4*uk6
+1j*epsilon**2*(A+AStar)
+2*(B-2*C)
+2*(CStar-2*BStar),
alpha))
int2[F_modes] = 0
int3 = EStar-FStar
int4 = np.copy(DStar)
int5 = CStar-BStar
# compute actual 3rd order term
nonlin3 = (2*convolutionSumKdV(u_full,-k**3*epsilon**4*AStar
+2*1j*epsilon**2*k**3*int1
+2*int2+2*int3+2*int4,alpha)
+6*convolutionSumKdV(uuStar,1j*epsilon**2*AStar + 2*int5,alpha))
return nonlin3,uk6,E,EStar,F,FStar
def t4ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,uk6,A,AStar,B,BStar,C,CStar,D,DStar,E,EStar,F,FStar):
"""Computes fourth order ROM term in KdV
*see paper / symbolic notebook for expression*
Computed in real space to avoid loops and then converted back
to Fourier space.
Parameters
----------
alpha : float
Degree of nonlinearity in KdV
F_modes : Numpy array
Set of resolved modes (and aliasing modes) to zero out
G_modes : Numpy array
Set of unresolved modes (and aliasing modes) to zero out
k : Numpy array (2M,1)
Array of wavenumbers
epsilon : float
Size of linear term (stiffness)
u_full : Numpy array (2M,1)
Current state of u in full form
uu : 1D Numpy array (2*M,)
Resolved modes of state vector convolved with itself
uuStar : 1D Numpy array (2*M,)
Unresolved modes of state vector convolved with itself
uk3 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^3
uk6 : 1D Numpy array (2*M,)
Resolved modes of state vector multiplied by k^6
A, AStar, B, BStar, C, CStar, DStar, E, EStar, F, FStar : 1D Numpy arrays (2*M,)
Specific convolutions used as inner terms in future terms
Returns
-------
nonlin4 : 1D Numpy Array (2*M,)
t4-model term
"""
# compute internal convolutions
internal1 = (convolutionSumKdV(u_full,-epsilon**4*uk6+1j*epsilon**2*(A+AStar)
+2*B-4*C-4*BStar+2*CStar,alpha))
internal1[F_modes] = 0
internal2 = (1j*epsilon**2*k**3*convolutionSumKdV(u_full,-3*epsilon**4*uk6
+1j*epsilon**2*(3*A+AStar)
-2*(-3*B+5*C)
+2*(-3*BStar+CStar),alpha))
internal2[F_modes] = 0
auxiliary1 = 2*convolutionSumKdV(u_full,epsilon**4*uk6-1j*epsilon**2*(A+3*AStar)
+2*(3*C-B)+2*(5*BStar-3*CStar),alpha)
auxiliary1[G_modes] = 0
auxiliary2 = 2*convolutionSumKdV(u_full,-3*epsilon**4*uk6+1j*epsilon**2*(3*A+AStar)
+2*(3*B-5*C)+2*(-3*BStar+CStar),alpha)
auxiliary2[F_modes] = 0
internal3 = convolutionSumKdV(u_full,1j*k**3*uk6*epsilon**6
+k**3*epsilon**4*(A-AStar)
+2*1j*epsilon**2*k**3*(3*C-B)
+2*1j*epsilon**2*k**3*(-3*BStar+CStar)
+auxiliary1+auxiliary2
-2*(E-2*F)
+2*(3*EStar-2*FStar)
-6*D+2*DStar,alpha)
internal3[F_modes]= 0
internal4 = convolutionSumKdV(1j*epsilon**2*uk3+uu,3*epsilon**4*uk6-1j*epsilon**2*(3*A+AStar)
+2*(-3*B+5*C)+2*(3*BStar-CStar),alpha)
internal4[F_modes] = 0
internal5 = convolutionSumKdV(uuStar,-epsilon**4*uk6+1j*epsilon**2*(A+3*AStar)
+2*B-6*C-10*BStar+6*CStar,alpha)
internal5[F_modes] = 0
# compute actual fourth order term
nonlin4 = (2*convolutionSumKdV(u_full,-1j*epsilon**6*k**6*AStar
+2*k**6*epsilon**4*(3*BStar-CStar)
+2*internal2
+2*internal3
+2*internal4
-2*k**3*1j*epsilon**2*(2*FStar-3*EStar)
+2*k**3*1j*epsilon**2*DStar
+2*internal5,alpha)
+8*convolutionSumKdV(uuStar,-k**3*epsilon**4*AStar
+2*1j*epsilon**2*k**3*(-2*BStar+CStar)
+2*internal1
+2*(EStar-FStar)
+2*DStar,alpha)
-48*convolutionSumKdV(BStar,1j*epsilon**2*AStar+2*CStar,alpha)
+6*convolutionSumKdV(1j*epsilon**2*AStar+2*(BStar+CStar),
1j*epsilon**2*AStar+2*(BStar+CStar),alpha)
)
nonlin4 = -nonlin4
return nonlin4
def RHSKdV(t,u,params):
"""
Computes the RHS for a full KdV or ROM simulation. For use in solver.
Parameters
----------
t : float
Current time
u : Numpy array (N,)
Current state vector
params : Dictionary
Dictionary of relevant parameters (see below)
N : float, number of positive modes in simulation
M : float, number of positive modes in "full" intermediate compuation
alpha : float, degree of nonlinearity in KdV
epsilon : float, size of linear term (stiffness)
tau : float, time decay modifier
coeffs : Numpy array, renormalization coefficients for ROM (None if no ROM)
Returns
-------
RHS : 1D Numpy array (N,)
Derivative of each positive mode in state vector
"""
# extract parameters from dictionary
N = params['N']
M = params['M']
alpha = params['alpha']
epsilon = params['epsilon']
tau = params['tau']
coeffs = params['coeffs']
# construct wavenumber array
k = np.concatenate([np.arange(0,M),np.arange(-M,0)])
# Linear and Markov term
nonlin0,u_full = markovKdV(u,M,alpha)
RHS = 1j*k[0:N]**3*epsilon**2*u + nonlin0[0:N]
if (np.any(coeffs == None)):
order = 0
else:
order = coeffs.shape[0]
if (order >= 1):
# compute t-model term
# define which modes are resolved / unresolved in full array
F_modes = np.concatenate([np.arange(0,N),np.arange(2*N-1,M+N+2),np.arange(2*M-N+1,2*M)])
G_modes = np.arange(N,2*M-N+1)
# compute t-model term
nonlin1,uuStar = tModelKdV(u_full,nonlin0,alpha,F_modes)
RHS = RHS + coeffs[0]*nonlin1[0:N]*t**(1-tau)
order = coeffs.shape[0]
if (order >= 2):
# compute t2-model term
nonlin2,uk3,uu,A,AStar,B,BStar,C,CStar,D,DStar = t2ModelKdV(u_full,nonlin0,uuStar,alpha,F_modes,G_modes,k,epsilon)
RHS = RHS + coeffs[1]*nonlin2[0:N]*t**(2*(1-tau))
if (order >= 3):
# compute t3-model term
nonlin3,uk6,E,EStar,F,FStar = t3ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,A,AStar,B,BStar,C,CStar,DStar)
RHS = RHS + coeffs[2]*nonlin3[0:N]*t**(3*(1-tau))
if (order == 4):
# compute t4-model term
nonlin4 = t4ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,uk6,A,AStar,B,BStar,C,CStar,D,DStar,E,EStar,F,FStar)
RHS = RHS + coeffs[3]*nonlin4[0:N]*t**(4*(1-tau))
return RHS
def getMass(u,N):
"""Computes mass in first N modes for all timesteps from solution array u
Parameters
----------
u : 2D Numpy Array (M,tList)
Positive modes of state vector for all timesteps
N : int
Number of positive modes to include in mass measurement
Returns
-------
mass : 1D Numpy Array (tList,)
Energy in first N modes at all timesteps
"""
mass = np.sum(2*(abs(u[0:N,]))**2,0)
return mass
def runSim(params):
"""
Runs an actual ROM or non-ROM simulation of KdV
Parameters
----------
params : Dictionary
Dictionary of relevant parameters (see below)
N : float, number of positive modes in simulation
M : float, number of positive modes in "full" intermediate compuation
alpha : float, degree of nonlinearity in KdV
epsilon : float, size of linear term (stiffness)
tau : float, time decay modifier
coeffs : Numpy array, renormalization coefficients for ROM (None if no ROM)
IC : function handle, initial condition of simulation
endtime : float, final time to simulate to
timesteps: Numpy array, specific timesteps for which to save solution
Returns
-------
uSim : ODE solver output
Output solution from sp.integrate.solve_ivp (includes state vector at all timesteps, time vector, etc.)
"""
# unpack parameters from dictionary
N = params['N']
IC = params['IC']
endtime = params['endtime']
timesteps = params['timesteps']
# generate initial condition
x = np.linspace(0,2*np.pi-2*np.pi/(2*N),2*N)
y = IC(x)
uFull = fftnorm(y)
u = uFull[0:N]
# define RHS in form appropriate for solve_ivp
def myRHS(t,y):
out = RHSKdV(t,y,params)
return out
# solve the IVP
uSim = sp.integrate.solve_ivp(fun = myRHS, t_span = [0,endtime], y0 = u,method = "BDF", t_eval = timesteps)
return uSim
def makeRealSpace(u,N):
"""Takes a completed simulation and finds the real space solution at all timesteps for a chosen subset of modes
Parameters
----------
u : Numpy array (M,t)
Output of simulation giving energy in first M positive modes for all timesteps t
N : int
Number of positive modes to use in real space
Returns
-------
x : Numpy vector (2xN,1)
x-grid for plotting purposes
uReal : Numpy array (2xN,t)
Real space solution at all times
"""
# identify shapes of arrays
uShape = u.shape
numTimes = uShape[1]
# drop modes we don't wish to keep
uNew = u[0:N,:]
# generate full vector (with negative modes)
uFull = np.zeros((2*N,numTimes)) + 1j*0
uFull[0:N,:] = uNew
uFull[2*N-N+1:,:] = np.conj(np.flip(uNew[1:,:],0))
# initialize output
uReal = np.zeros(uFull.shape)
# take inverse transform for each timestep
# NOTE: is there a vectorized way to do this?
for i in np.arange(0,numTimes):
uReal[:,i] = ifftnorm(uFull[:,i])
return uReal
def makeAnimations(uList,t,legendList):
"""
Creates an animation from a list of simulations
Parameters
----------
uList : List of Numpy arrays of size (N,T)
Set of state vector evolutions to animate
t : Numpy array (T,)
Timesteps associated with simulations (must all be the same)
legendList : List of strings
Labels for each simulation
Returns
-------
anim : animation object
output from animation.FuncAnimation
"""
# identify the resolution to use for plots and generate x grid
N = min([x.shape[0] for x in uList])
xgrid = np.linspace(0,2*np.pi*(2*N-1)/(2*N),2*N)
# generate real space solutions
realSols = [makeRealSpace(x,N) for x in uList]
# initialize figure
myFig = plt.figure()
ax = plt.subplot()
ax.axis(xmin = 0,xmax = 2*np.pi-np.pi/N,ymin = -2, ymax = 4)
# create empty list of lines to populate each iteration
lineList = [ax.plot([],[]) for i in range(len(uList))]
# define function to draw each frame
def makeFrame(n):
for i in range(len(uList)):
lineList[i][0].set_data(xgrid,realSols[i][:,n])
plt.title('t = '+str(round(t[n],1)))
plt.legend(legendList, loc = "upper right")
return lineList
# generate animation
anim = animation.FuncAnimation(fig = myFig,func = makeFrame,frames = t.shape[0])
return anim
def renormalize(fullM, endtime, Nlist, Mlist, epsilon, alpha, tau, timesteps, IC = np.sin, plots = False):
"""
Finds renormalization coefficients based on a single simulation. If the
simulation doesn't yet exist, it creates it
Parameters
----------
fullM : int
Size of full simulation to base fits on
endtime : int
Endtime of full simulation
Nlist : list of ints
List of resolutions for which to find coefficients
Mlist : list of ints
List of intermediary "full" simulations to use for ROMs
epsilon : float
size of linear term (stiffness)
alpha : float
degree of nonlinearity in KdV
tau : float
time decay modifier
timesteps : Numpy array
specific timesteps for which to save solution
IC : function handle
initial condition of simulation (default np.sin)
plots : boolean
Indicates whether to generate plots (default: False)
Returns
-------
coeeffsArray1 : Numpy array (length(Nlist),1)
Renormalization coefficients for t-model only
coeffsArray2 : Numpy array (length(Nlist),2)
Renormalization coefficients for t-model and t2-model only
coeffsArray3 : Numpy array (length(Nlist),3)
Renormalization coefficients for t1-t3-models
coeffsArray4 : Numpy array (length(Nlist),4)
Renormalization coefficients for t1-t4-models
coeffsArray2only : Numpy array (length(Nlist),1)
Renormalization coefficients for t2-model only
coeffsArray24only : Numpy array (length(Nlist),2)
Renormalization coefficients for t2-model and t4-model only
fitLines : Dict
Contains scaling law fits for each ROM coefficients
of form c = -b * N^a
Terms given are a, b, and r (correlation coefficient of fit)
err : Dict
Contains least-squares error for each fit for each model and resolution
"""
# Check if full simulation has already been constructed
# if so, load it, if not, generate it
try:
uFull = np.load("u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+".npy")
tFull = np.load("t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+".npy")
except:
fullParams = {
'N': fullM,
'M': int(3/2*fullM),
'alpha': 1,
'epsilon': epsilon,
'tau': 1,
'coeffs': None,
'IC': IC,
'endtime': endtime,
'timesteps': timesteps
}
uSimFull = runSim(fullParams)
uFull = uSimFull.y
tFull = uSimFull.t
np.save( "u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p'),uFull)
np.save( "t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p'),tFull)
# initialize output arrays
coeffsArray1 = np.zeros((Nlist.shape[0],1))
coeffsArray2 = np.zeros((Nlist.shape[0],2))
coeffsArray3 = np.zeros((Nlist.shape[0],3))
coeffsArray4 = np.zeros((Nlist.shape[0],4))
coeffsArray2only = np.zeros((Nlist.shape[0],1))
coeffsArray24only = np.zeros((Nlist.shape[0],2))
# recover number of timesteps
numSteps = tFull.shape[0]
# initialize least squares error output
err = {"t-model" : np.zeros((Nlist.shape[0],1)),
"t2-model" : np.zeros((Nlist.shape[0],1)),
"t3-model" : np.zeros((Nlist.shape[0],1)),
"t4-model" : np.zeros((Nlist.shape[0],1)),
"t2-model only" : np.zeros((Nlist.shape[0],1)),
"t2- and t4-models" : np.zeros((Nlist.shape[0],1))}
# loop through all resolutions
for j in np.arange(0,Nlist.shape[0]):
# Find number of positive terms in ROM, in intermediate calculations, and wavenumber array
N = Nlist[j]
M = Mlist[j]
k = np.concatenate([np.arange(0,M),np.arange(-M,0)])
# Gather first derivative data for fitting purposes
exactEnergy = np.zeros((N,numSteps))
R0Energy = np.zeros((N,numSteps))
R1Energy = np.zeros((N,numSteps))
R2Energy = np.zeros((N,numSteps))
R3Energy = np.zeros((N,numSteps))
R4Energy = np.zeros((N,numSteps))
# plug exact solution into exact RHS and all ROM terms and find energy contribution of each
for i in np.arange(0,numSteps):
# exact RHS
exactRHS,dummyU = markovKdV(uFull[:,i],int(fullM*3/2),alpha)
exactEnergy[:,i] = np.real(exactRHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(exactRHS[0:N])*uFull[0:N,i])
# Markov RHS
nonlin0,u_full = markovKdV(uFull[0:N,i],M,alpha)
R0RHS = nonlin0
R0Energy[:,i] = np.real(R0RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R0RHS[0:N])*uFull[0:N,i])
# First order RHS term
F_modes = np.concatenate([np.arange(0,N),np.arange(2*N-1,M+N+2),np.arange(2*M-N+1,2*M)])
G_modes = np.arange(N,2*M-N+1)
nonlin1,uuStar = tModelKdV(u_full,nonlin0,alpha,F_modes)
R1RHS = nonlin1*tFull[i]**(1-tau)
R1Energy[:,i] = np.real(R1RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R1RHS[0:N])*uFull[0:N,i])
# Second order RHS term
nonlin2,uk3,uu,A,AStar,B,BStar,C,CStar,D,DStar = t2ModelKdV(u_full,nonlin0,uuStar,alpha,F_modes,G_modes,k,epsilon)
R2RHS = nonlin2*tFull[i]**(2*(1-tau))
R2Energy[:,i] = np.real(R2RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R2RHS[0:N])*uFull[0:N,i])
# Third order RHS term
nonlin3,uk6,E,EStar,F,FStar = t3ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,A,AStar,B,BStar,C,CStar,DStar)
R3RHS = nonlin3*tFull[i]**(3*(1-tau))
R3Energy[:,i] = np.real(R3RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R3RHS[0:N])*uFull[0:N,i])
# Fourth order RHS term
nonlin4 = t4ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,uk6,A,AStar,B,BStar,C,CStar,D,DStar,E,EStar,F,FStar)
R4RHS = nonlin4*tFull[i]**(4*(1-tau))
R4Energy[:,i] = np.real(R4RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R4RHS[0:N])*uFull[0:N,i])
if j == 0:
R0Energy0 = np.copy(R0Energy)
R1Energy0 = np.copy(R1Energy)
R2Energy0 = np.copy(R2Energy)
R3Energy0 = np.copy(R3Energy)
R4Energy0 = np.copy(R4Energy)
##################################################
# Use least-squares fit to identify coefficients #
##################################################
# t-model coefficient
coeffsArray1[j,:] = np.sum((exactEnergy - R0Energy)*R1Energy)/np.sum(R1Energy*R1Energy)
err["t-model"][j] = np.sum((exactEnergy - R0Energy - coeffsArray1[j,0]*R1Energy)**2)
# t2-model coefficient
LSMatrix = (np.array([[np.sum(R1Energy*R1Energy),np.sum(R1Energy*R2Energy)],
[np.sum(R2Energy*R1Energy),np.sum(R2Energy*R2Energy)]]))
LSb = (np.array([np.sum(R1Energy*(exactEnergy-R0Energy)),np.sum(R2Energy*(exactEnergy-R0Energy))]))
coeffsArray2[j,:] = np.linalg.solve(LSMatrix,LSb)
err["t2-model"][j] = np.sum((exactEnergy - R0Energy - coeffsArray2[j,0]*R1Energy - coeffsArray2[j,1]*R2Energy)**2)
# t3-model coefficient
LSMatrix = (np.array([[np.sum(R1Energy*R1Energy),np.sum(R1Energy*R2Energy),np.sum(R1Energy*R3Energy)],
[np.sum(R2Energy*R1Energy),np.sum(R2Energy*R2Energy),np.sum(R2Energy*R3Energy)],
[np.sum(R3Energy*R1Energy),np.sum(R3Energy*R2Energy),np.sum(R3Energy*R3Energy)]]))
LSb = (np.array([np.sum(R1Energy*(exactEnergy-R0Energy)),np.sum(R2Energy*(exactEnergy-R0Energy)),np.sum(R3Energy*(exactEnergy-R0Energy))]))
coeffsArray3[j,:] = np.linalg.solve(LSMatrix,LSb)
err["t3-model"][j] = np.sum((exactEnergy - R0Energy - coeffsArray3[j,0]*R1Energy - coeffsArray3[j,1]*R2Energy - coeffsArray3[j,2]*R3Energy)**2)
# t4-model coefficient
LSMatrix = (np.array([[np.sum(R1Energy*R1Energy),np.sum(R1Energy*R2Energy),np.sum(R1Energy*R3Energy),np.sum(R1Energy*R4Energy)],
[np.sum(R2Energy*R1Energy),np.sum(R2Energy*R2Energy),np.sum(R2Energy*R3Energy),np.sum(R2Energy*R4Energy)],
[np.sum(R3Energy*R1Energy),np.sum(R3Energy*R2Energy),np.sum(R3Energy*R3Energy),np.sum(R3Energy*R4Energy)],
[np.sum(R4Energy*R1Energy),np.sum(R4Energy*R2Energy),np.sum(R4Energy*R3Energy),np.sum(R4Energy*R4Energy)]]))
LSb = (np.array([np.sum(R1Energy*(exactEnergy-R0Energy)),np.sum(R2Energy*(exactEnergy-R0Energy)),np.sum(R3Energy*(exactEnergy-R0Energy)),np.sum(R4Energy*(exactEnergy-R0Energy))]))
coeffsArray4[j,:] = np.linalg.solve(LSMatrix,LSb)
err["t4-model"][j] = np.sum((exactEnergy - R0Energy - coeffsArray4[j,0]*R1Energy - coeffsArray4[j,1]*R2Energy - coeffsArray4[j,2]*R3Energy - coeffsArray4[j,3]*R4Energy)**2)
# t2-model with *no* t-model
coeffsArray2only[j,:] = np.sum((exactEnergy - R0Energy)*R2Energy)/np.sum(R2Energy*R2Energy)
err["t2-model only"][j] = np.sum((exactEnergy - R0Energy - coeffsArray2only[j,0]*R2Energy)**2)
# t2-model and t4-model with *no* t-model or t3-model
LSMatrix = (np.array([[np.sum(R2Energy*R2Energy),np.sum(R2Energy*R4Energy)],
[np.sum(R4Energy*R2Energy),np.sum(R4Energy*R4Energy)]]))
LSb = (np.array([np.sum(R2Energy*(exactEnergy-R0Energy)),np.sum(R4Energy*(exactEnergy-R0Energy))]))
coeffsArray24only[j,:] = np.linalg.solve(LSMatrix,LSb)
err["t2- and t4-models"][j] = np.sum((exactEnergy - R0Energy - coeffsArray24only[j,0]*R2Energy - coeffsArray24only[j,1]*R4Energy)**2)
# Generate plots if desired
if plots:
# Plot 1: Qualitative comparison of each term contributing to energy movement
N = Nlist[0]
fig1, ax1 = plt.subplots(3,2)
ax1[0,0].plot(tFull,np.sum(exactEnergy[0:N,:],0))
ax1[0,0].set_title("Exact Energy Decay")
ax1[0,1].plot(tFull,np.sum(R0Energy0[0:N,:],0))
ax1[0,1].set_title("Markov Energy Decay")
ax1[1,0].plot(tFull,np.sum(R2Energy0[0:N,:],0))
ax1[1,0].set_title("R2 Energy Decay")
ax1[1,1].plot(tFull,np.sum(R1Energy0[0:N,:],0))
ax1[1,1].set_title("R1 Energy Decay")
ax1[2,0].plot(tFull,np.sum(R4Energy0[0:N,:],0))
ax1[2,0].set_title("R4 Energy Decay")
ax1[2,1].plot(tFull,np.sum(R3Energy0[0:N,:],0))
ax1[2,1].set_title("R3 Energy Decay")
fig1.suptitle("N = "+str(N)+" Energy Decays")
plt.tight_layout()
# remove axis labels to not crowd plots (since only qualitative comparisons desired)
for i in range(0,3):
for j in range(0,2):
#ax1[i,j].tick_params(labelbottom=False,labelleft=False)
ax1[i,j].tick_params(labelleft=False)
# compute best fit lines for coefficients in log-log space
fitLines = {"t-model" : np.zeros((1,3)),
"t2-model" : np.zeros((2,3)),
"t3-model" : np.zeros((3,3)),
"t4-model" : np.zeros((4,3)),
"t2-model only" : np.zeros((1,3)),
"t2- and t4-models" : np.zeros((2,3))}
fig2, ax2 = plt.subplots(2,2)
# t-model
ax2[0,0].scatter(np.log(Nlist),np.log(abs(coeffsArray1[:,0])))
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray1[:,0])))
ax2[0,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist))
fitLines["t-model"][:] = np.array([slope,np.exp(intercept),r_value])
# t2-model
ax2[0,0].scatter(np.log(Nlist),np.log(abs(coeffsArray2[:,0])),color="red")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2[:,0])))
ax2[0,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "red")
fitLines["t2-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray2[:,1])),color="red")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2[:,1])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "red")
fitLines["t2-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
# t3-model
ax2[0,0].scatter(np.log(Nlist),np.log(abs(coeffsArray3[:,0])),color="green")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,0])))
ax2[0,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "green")
fitLines["t3-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray3[:,1])),color="green")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,1])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "green")
fitLines["t3-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
ax2[1,0].scatter(np.log(Nlist),np.log(abs(coeffsArray3[:,2])),color="green")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,2])))
ax2[1,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "green")
fitLines["t3-model"][2,:] = np.array([slope,np.exp(intercept),r_value])
# t4-model
ax2[0,0].scatter(np.log(Nlist),np.log(abs(coeffsArray4[:,0])),color="purple")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,0])))
ax2[0,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "purple")
fitLines["t4-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray4[:,1])),color="purple")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,1])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "purple")
fitLines["t4-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
ax2[1,0].scatter(np.log(Nlist),np.log(abs(coeffsArray4[:,2])),color="purple")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,2])))
ax2[1,0].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "purple")
fitLines["t4-model"][2,:] = np.array([slope,np.exp(intercept),r_value])
ax2[1,1].scatter(np.log(Nlist),np.log(abs(coeffsArray4[:,3])),color="purple")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,3])))
ax2[1,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "purple")
fitLines["t4-model"][3,:] = np.array([slope,np.exp(intercept),r_value])
# t2-model alone
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray2only[:,0])),color="cyan")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2only[:,0])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "cyan")
fitLines["t2-model only"][:] = np.array([slope,np.exp(intercept),r_value])
# t2- and t4-model alone
ax2[0,1].scatter(np.log(Nlist),np.log(abs(coeffsArray24only[:,0])),color="black")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray24only[:,0])))
ax2[0,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "black")
fitLines["t2- and t4-models"][0,:] = np.array([slope,np.exp(intercept),r_value])
ax2[1,1].scatter(np.log(Nlist),np.log(abs(coeffsArray24only[:,1])),color="black")
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray24only[:,1])))
ax2[1,1].plot(np.log(Nlist),intercept + slope*np.log(Nlist), color = "black")
fitLines["t2- and t4-models"][1,:] = np.array([slope,np.exp(intercept),r_value])
ax2[0,0].set_title("t-model")
ax2[0,1].set_title("t2-model")
ax2[1,0].set_title("t3-model")
ax2[1,1].set_title("t4-model")
customLines = [plt.Line2D([0],[0], color = "blue"),
plt.Line2D([0],[0], color = "red"),
plt.Line2D([0],[0], color = "green"),
plt.Line2D([0],[0], color = "purple"),
plt.Line2D([0],[0], color = "cyan"),
plt.Line2D([0],[0], color = "black")]
ax2[0,1].legend(customLines,["First Order Model","Second Order Model",
"Third Order Model","Fourth Order Model",
"Only Second Order","Second and Fourth Order"],
prop = {"size":5})
fig2.suptitle("Renormalization Coefficients (log(a) vs log(N))")
plt.subplots_adjust(right=0.7)
plt.tight_layout()
# calculate best fit lines if plotting didn't occur
else:
fitLines = {"t-model" : np.zeros((1,3)),
"t2-model" : np.zeros((2,3)),
"t3-model" : np.zeros((3,3)),
"t4-model" : np.zeros((4,3)),
"t2-model only" : np.zeros((1,3)),
"t2- and t4-models" : np.zeros((2,3))}
# t-model
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray1[:,0])))
fitLines["t-model"][:] = np.array([slope,np.exp(intercept),r_value])
# second order ROM
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2[:,0])))
fitLines["t2-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2[:,1])))
fitLines["t2-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
# third order ROM
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,0])))
fitLines["t3-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,1])))
fitLines["t3-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray3[:,2])))
fitLines["t3-model"][2,:] = np.array([slope,np.exp(intercept),r_value])
# fourth order ROM
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,0])))
fitLines["t4-model"][0,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,1])))
fitLines["t4-model"][1,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,2])))
fitLines["t4-model"][2,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray4[:,3])))
fitLines["t4-model"][3,:] = np.array([slope,np.exp(intercept),r_value])
# only t2-model
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray2only[:,0])))
fitLines["t2-model only"][:] = np.array([slope,np.exp(intercept),r_value])
# only t2- and t4-models
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray24only[:,0])))
fitLines["t2- and t4-models"][0,:] = np.array([slope,np.exp(intercept),r_value])
slope,intercept,r_value,p_value,std_err = sp.stats.linregress(np.log(Nlist), np.log(abs(coeffsArray24only[:,1])))
fitLines["t2- and t4-models"][1,:] = np.array([slope,np.exp(intercept),r_value])
return coeffsArray1,coeffsArray2,coeffsArray3,coeffsArray4,coeffsArray2only,coeffsArray24only,fitLines,err
def scalingLaws(fullM, endtime, Nlist, Mlist, epsilonList, alpha, tau, timesteps, IC = np.sin, plots = False):
"""
Finds renormalization coefficients based on a simulations with a range of
epsilon values.
Parameters
----------
fullM : int
Size of full simulation to base fits on
endtime : int
Endtime of full simulation
Nlist : list of ints
List of resolutions for which to find coefficients
Mlist : list of ints
List of intermediary "full" simulations to use for ROMs
epsilonList : list of floats
size of linear term (stiffness)
alpha : float
degree of nonlinearity in KdV
tau : float
time decay modifier
timesteps : Numpy array
specific timesteps for which to save solution
IC : function handle
initial condition of simulation (default np.sin)
plots : boolean
Indicates whether to generate plots (default: False)
Returns
-------
coeeffsArray1 : Numpy array (length(Nlist),1)
Renormalization coefficients for t-model only
coeffsArray2 : Numpy array (length(Nlist),2)
Renormalization coefficients for t-model and t2-model only
coeffsArray3 : Numpy array (length(Nlist),3)
Renormalization coefficients for t1-t3-models
coeffsArray4 : Numpy array (length(Nlist),4)
Renormalization coefficients for t1-t4-models
coeffsArray2only : Numpy array (length(Nlist),1)
Renormalization coefficients for t2-model only
coeffsArray24only : Numpy array (length(Nlist),2)
Renormalization coefficients for t2-model and t4-model only
fitLines : Dict
Contains scaling law fits for each ROM coefficients
of form c = -b * N^a
Terms given are a, b, and r (correlation coefficient of fit)
"""
# initialize output arrays
c1 = np.zeros((len(Nlist),1,len(epsilonList)))
c2 = np.zeros((len(Nlist),2,len(epsilonList)))
c3 = np.zeros((len(Nlist),3,len(epsilonList)))
c4 = np.zeros((len(Nlist),4,len(epsilonList)))
c2only = np.zeros((len(Nlist),1,len(epsilonList)))
c24only = np.zeros((len(Nlist),2,len(epsilonList)))
# loop through all epsilon values
for i in np.arange(0,len(epsilonList)):
# renormalize for given epsilon value and save results
coeffsArray1,coeffsArray2,coeffsArray3,coeffsArray4,coeffsArray2only,coeffsArray24only,fitLines,err = renormalize(fullM = fullM, endtime = endtime, Nlist = Nlist, Mlist = Mlist, epsilon = epsilonList[i], alpha = alpha, tau = tau, timesteps = timesteps, IC = IC, plots = False)
c1[:,:,i] = coeffsArray1
c2[:,:,i] = coeffsArray2
c3[:,:,i] = coeffsArray3
c4[:,:,i] = coeffsArray4
c2only[:,:,i] = coeffsArray2only
c24only[:,:,i] = coeffsArray24only
# pack results into dictionary for output
coefficients = {"t-model" : c1,
"t2-model" : c2,
"t3-model" : c3,
"t4-model" : c4,
"t2-model only" : c2only,
"t2- and t4-models" : c24only}
# initialize output with best fit scaling laws
fitLines = {"t-model" : np.zeros((1,3)),
"t2-model" : np.zeros((2,3)),
"t3-model" : np.zeros((3,3)),
"t4-model" : np.zeros((4,3)),
"t2-model only" : np.zeros((1,3)),
"t2- and t4-models" : np.zeros((2,3))}
# find the scaling laws for each coefficient
# t-model coefficient
fitLines["t-model"][0,:] = epsilonNscalingLaw(c1[:,0,:],Nlist,epsilonList)
# Second order model coefficients
fitLines["t2-model"][0,:] = epsilonNscalingLaw(c2[:,0,:],Nlist,epsilonList)
fitLines["t2-model"][1,:] = epsilonNscalingLaw(c2[:,1,:],Nlist,epsilonList)
# Third order model coefficients
fitLines["t3-model"][0,:] = epsilonNscalingLaw(c3[:,0,:],Nlist,epsilonList)
fitLines["t3-model"][1,:] = epsilonNscalingLaw(c3[:,1,:],Nlist,epsilonList)
fitLines["t3-model"][2,:] = epsilonNscalingLaw(c3[:,2,:],Nlist,epsilonList)
# Fourth order model coefficients
fitLines["t4-model"][0,:] = epsilonNscalingLaw(c4[:,0,:],Nlist,epsilonList)
fitLines["t4-model"][1,:] = epsilonNscalingLaw(c4[:,1,:],Nlist,epsilonList)
fitLines["t4-model"][2,:] = epsilonNscalingLaw(c4[:,2,:],Nlist,epsilonList)
fitLines["t4-model"][3,:] = epsilonNscalingLaw(c4[:,3,:],Nlist,epsilonList)
# Only t2-model coefficient
fitLines["t2-model only"][0,:] = epsilonNscalingLaw(c2only[:,0,:],Nlist,epsilonList)
# Only t2- and t4-models coefficients
fitLines["t2- and t4-models"][0,:] = epsilonNscalingLaw(c24only[:,0,:],Nlist,epsilonList)
fitLines["t2- and t4-models"][1,:] = epsilonNscalingLaw(c24only[:,1,:],Nlist,epsilonList)
# make plots
fig1,ax1 = plt.subplots(1,2)
fig2,ax2 = plt.subplots(2,2)
fig3,ax3 = plt.subplots(3,2)
fig4,ax4 = plt.subplots(4,2)
fig5,ax5 = plt.subplots(1,2)
fig6,ax6 = plt.subplots(2,2)
# loop through epsilon values
for i in np.arange(len(epsilonList)):
# t-model coefficient
ax1[0].scatter(np.log(Nlist),np.log(-c1[:,0,i]))
# Second order model coefficients
ax2[0,0].scatter(np.log(Nlist),np.log(-c2[:,0,i]))
ax2[1,0].scatter(np.log(Nlist),np.log(-c2[:,1,i]))
# Third order model coefficients
ax3[0,0].scatter(np.log(Nlist),np.log(-c3[:,0,i]))
ax3[1,0].scatter(np.log(Nlist),np.log(-c3[:,1,i]))
ax3[2,0].scatter(np.log(Nlist),np.log(-c3[:,2,i]))
# Fourth order model coefficients
ax4[0,0].scatter(np.log(Nlist),np.log(-c4[:,0,i]))
ax4[1,0].scatter(np.log(Nlist),np.log(-c4[:,1,i]))
ax4[2,0].scatter(np.log(Nlist),np.log(-c4[:,2,i]))
ax4[3,0].scatter(np.log(Nlist),np.log(-c4[:,3,i]))
# Only t2-model
ax5[0].scatter(np.log(Nlist),np.log(-c2only[:,0,i]))
# Only t2- and t4-models
ax6[0,0].scatter(np.log(Nlist),np.log(-c24only[:,0,i]))
ax6[1,0].scatter(np.log(Nlist),np.log(-c24only[:,1,i]))
# plot best fit lines
myEps = epsilonList[i]
myFit = fitLines["t-model"][0,:]
ax1[0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2-model"][0,:]
ax2[0,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2-model"][1,:]
ax2[1,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t3-model"][0,:]
ax3[0,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t3-model"][1,:]
ax3[1,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t3-model"][2,:]
ax3[2,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t4-model"][0,:]
ax4[0,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t4-model"][1,:]
ax4[1,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t4-model"][2,:]
ax4[2,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t4-model"][3,:]
ax4[3,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2-model only"][0,:]
ax5[0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2- and t4-models"][0,:]
ax6[0,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
myFit = fitLines["t2- and t4-models"][1,:]
ax6[1,0].plot(np.log(Nlist),np.log(-myFit[0])+myFit[1]*np.log(Nlist)+myFit[2]*np.log(myEps))
# loop through epsilon values
for j in np.arange(len(Nlist)):
# t-model coefficient
ax1[1].scatter(np.log(epsilonList),np.log(-c1[j,0,:]))
# Second order model coefficients
ax2[0,1].scatter(np.log(epsilonList),np.log(-c2[j,0,:]))
ax2[1,1].scatter(np.log(epsilonList),np.log(-c2[j,1,:]))
# Third order model coefficients
ax3[0,1].scatter(np.log(epsilonList),np.log(-c3[j,0,:]))
ax3[1,1].scatter(np.log(epsilonList),np.log(-c3[j,1,:]))
ax3[2,1].scatter(np.log(epsilonList),np.log(-c3[j,2,:]))
# Fourth order model coefficients
ax4[0,1].scatter(np.log(epsilonList),np.log(-c4[j,0,:]))
ax4[1,1].scatter(np.log(epsilonList),np.log(-c4[j,1,:]))
ax4[2,1].scatter(np.log(epsilonList),np.log(-c4[j,2,:]))
ax4[3,1].scatter(np.log(epsilonList),np.log(-c4[j,3,:]))
# Only t2-model
ax5[1].scatter(np.log(epsilonList),np.log(-c2only[j,0,:]))
# Only t2- and t4-models
ax6[0,1].scatter(np.log(epsilonList),np.log(-c24only[j,0,:]))
ax6[1,1].scatter(np.log(epsilonList),np.log(-c24only[j,1,:]))
# plot best fit lines
myN = Nlist[j]
myFit = fitLines["t-model"][0,:]
ax1[1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2-model"][0,:]
ax2[0,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2-model"][1,:]
ax2[1,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t3-model"][0,:]
ax3[0,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t3-model"][1,:]
ax3[1,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t3-model"][2,:]
ax3[2,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t4-model"][0,:]
ax4[0,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t4-model"][1,:]
ax4[1,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t4-model"][2,:]
ax4[2,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t4-model"][3,:]
ax4[3,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2-model only"][0,:]
ax5[1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2- and t4-models"][0,:]
ax6[0,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
myFit = fitLines["t2- and t4-models"][1,:]
ax6[1,1].plot(np.log(epsilonList),np.log(-myFit[0])+myFit[1]*np.log(myN)+myFit[2]*np.log(epsilonList))
# label all plots
fig1.suptitle("t-model")
ax1[0].set_title("log(a1) vs log(N)")
ax1[0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax1[1].set_title("log(a1) vs log(epsilon)")
ax1[1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig2.suptitle("Second Order Renormalization")
ax2[0,0].set_title("log(a1) vs log(N)")
ax2[1,0].set_title("log(a2) vs log(N)")
ax2[0,0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax2[0,1].set_title("log(a1) vs log(epsilon)")
ax2[1,1].set_title("log(a1) vs log(epsilon)")
ax2[0,1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig3.suptitle("Third Order Renormalization")
ax3[0,0].set_title("log(a1) vs log(N)")
ax3[1,0].set_title("log(a2) vs log(N)")
ax3[2,0].set_title("log(a3) vs log(N)")
ax3[0,0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax3[0,1].set_title("log(a1) vs log(epsilon)")
ax3[1,1].set_title("log(a2) vs log(epsilon)")
ax3[2,1].set_title("log(a3) vs log(epsilon)")
ax3[0,1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig4.suptitle("Fourth Order Renormalization")
ax4[0,0].set_title("log(a1) vs log(N)")
ax4[1,0].set_title("log(a2) vs log(N)")
ax4[2,0].set_title("log(a3) vs log(N)")
ax4[3,0].set_title("log(a4) vs log(N)")
ax4[0,0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax4[0,1].set_title("log(a1) vs log(epsilon)")
ax4[1,1].set_title("log(a2) vs log(epsilon)")
ax4[2,1].set_title("log(a3) vs log(epsilon)")
ax4[3,1].set_title("log(a4) vs log(epsilon)")
ax4[0,1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig5.suptitle("Only t2-Model Renormalization")
ax5[0].set_title("log(a2) vs log(N)")
ax5[0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax5[1].set_title("log(a2) vs log(epsilon)")
ax5[1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
fig6.suptitle("Second and Fourth Order Renormalization")
ax6[0,0].set_title("log(a2) vs log(N)")
ax6[1,0].set_title("log(a4) vs log(N)")
ax6[0,0].legend(["epsilon = "+str(round(epsilonList[i]),2) for i in range(len(epsilonList))],prop = {"size":5})
ax6[0,1].set_title("log(a2) vs log(epsilon)")
ax6[1,1].set_title("log(a4) vs log(epsilon)")
ax6[0,1].legend(["N = "+str(Nlist[i]) for i in range(len(Nlist))],prop = {"size":5})
plt.tight_layout()
return coefficients,fitLines
def epsilonNscalingLaw(coeffArray,Nlist,epsilonList):
numEps = len(epsilonList)
numN = len(Nlist)
epsilonTile = np.tile(epsilonList,(numN,1))
Ntile = np.transpose(np.tile(Nlist,(numEps,1)))
LSMatrix = (np.array([[numEps*numN,np.sum(np.log(Ntile)),np.sum(np.log(epsilonTile))],
[np.sum(np.log(Ntile)),np.sum(np.log(Ntile)**2),np.sum(np.log(Ntile)*np.log(epsilonTile))],
[np.sum(np.log(epsilonTile)),np.sum(np.log(Ntile)*np.log(epsilonTile)),np.sum(np.log(epsilonTile)**2)]])
)
LSb = np.array([np.sum(np.log(np.abs(coeffArray))),np.sum(np.log(np.abs(coeffArray))*np.log(Ntile)),np.sum(np.log(np.abs(coeffArray))*np.log(epsilonTile))])
sol = np.linalg.solve(LSMatrix,LSb)
sol[0] = -np.exp(sol[0])
return sol
def findError(compareList,exact,t):
"""
Finds the two norm of the error between a list of ROMs and an exact solution.
Parameters
----------
compareList : List of Numpy arrays of size (N,T)
Set of state vector evolutions to find errors from
exact : Numpy array of size (N,T)
Exact solution for the same timesteps
t : Numpy array (T,)
Timesteps associated with simulations (must all be the same)
Returns
-------
errList : List of Numpy arrays of size (T,1)
Arrays with the two-norm of the error at all timesteps for each ROM
"""
# find the ROM size
N = compareList[0].shape[0]
# generate real space solutions
realSols = [makeRealSpace(x,N) for x in compareList]
exactSol = makeRealSpace(exact,N)
# compute two norm of error at all times
errList =[np.sum((i - exactSol)**2,0) for i in realSols]
return errList
def renormalizeRobust(fullM, endtime, Nlist, Mlist, epsilon, alpha, tau, timesteps, IC = np.sin, plots = False):
"""
Finds renormalization coefficients based on a single simulation. If the
simulation doesn't yet exist, it creates it
Parameters
----------
fullM : int
Size of full simulation to base fits on
endtime : int
Endtime of full simulation
Nlist : list of ints
List of resolutions for which to find coefficients
Mlist : list of ints
List of intermediary "full" simulations to use for ROMs
epsilon : float
size of linear term (stiffness)
alpha : float
degree of nonlinearity in KdV
tau : float
time decay modifier
timesteps : Numpy array
specific timesteps for which to save solution
IC : function handle
initial condition of simulation (default np.sin)
plots : boolean
Indicates whether to generate plots (default: False)
Returns
-------
coeeffsArray1 : Numpy array (length(Nlist),1)
Renormalization coefficients for t-model only
coeffsArray2 : Numpy array (length(Nlist),2)
Renormalization coefficients for t-model and t2-model only
coeffsArray3 : Numpy array (length(Nlist),3)
Renormalization coefficients for t1-t3-models
coeffsArray4 : Numpy array (length(Nlist),4)
Renormalization coefficients for t1-t4-models
coeffsArray2only : Numpy array (length(Nlist),1)
Renormalization coefficients for t2-model only
coeffsArray24only : Numpy array (length(Nlist),2)
Renormalization coefficients for t2-model and t4-model only
fitLines : Dict
Contains scaling law fits for each ROM coefficients
of form c = -b * N^a
Terms given are a, b, and r (correlation coefficient of fit)
"""
# Check if full simulation has already been constructed
# if so, load it, if not, generate it
try:
uFull = np.load("u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__+".npy")
tFull = np.load("t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__+".npy")
except:
fullParams = {
'N': fullM,
'M': int(3/2*fullM),
'alpha': 1,
'epsilon': epsilon,
'tau': 1,
'coeffs': None,
'IC': IC,
'endtime': endtime,
'timesteps': timesteps
}
uSimFull = runSim(fullParams)
uFull = uSimFull.y
tFull = uSimFull.t
np.save( "u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__,uFull)
np.save( "t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__,tFull)
# recover number of timesteps
numSteps = tFull.shape[0]
# initialize output arrays
coeffsArray1 = np.zeros((Nlist.shape[0],numSteps - 30,1))
coeffsArray2 = np.zeros((Nlist.shape[0],numSteps - 30,2))
coeffsArray3 = np.zeros((Nlist.shape[0],numSteps - 30,3))
coeffsArray4 = np.zeros((Nlist.shape[0],numSteps - 30,4))
coeffsArray2only = np.zeros((Nlist.shape[0],numSteps - 30,1))
coeffsArray24only = np.zeros((Nlist.shape[0],numSteps - 30,2))
# loop through all resolutions
for j in np.arange(0,Nlist.shape[0]):
# Find number of positive terms in ROM, in intermediate calculations, and wavenumber array
N = Nlist[j]
M = Mlist[j]
k = np.concatenate([np.arange(0,M),np.arange(-M,0)])
# Gather first derivative data for fitting purposes
exactEnergy = np.zeros((N,numSteps))
R0Energy = np.zeros((N,numSteps))
R1Energy = np.zeros((N,numSteps))
R2Energy = np.zeros((N,numSteps))
R3Energy = np.zeros((N,numSteps))
R4Energy = np.zeros((N,numSteps))
# plug exact solution into exact RHS and all ROM terms and find energy contribution of each
for i in np.arange(0,numSteps):
# exact RHS
exactRHS,dummyU = markovKdV(uFull[:,i],int(fullM*3/2),alpha)
exactEnergy[:,i] = np.real(exactRHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(exactRHS[0:N])*uFull[0:N,i])
# Markov RHS
nonlin0,u_full = markovKdV(uFull[0:N,i],M,alpha)
R0RHS = nonlin0
R0Energy[:,i] = np.real(R0RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R0RHS[0:N])*uFull[0:N,i])
# First order RHS term
F_modes = np.concatenate([np.arange(0,N),np.arange(2*N-1,M+N+2),np.arange(2*M-N+1,2*M)])
G_modes = np.arange(N,2*M-N+1)
nonlin1,uuStar = tModelKdV(u_full,nonlin0,alpha,F_modes)
R1RHS = nonlin1*tFull[i]**(1-tau)
R1Energy[:,i] = np.real(R1RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R1RHS[0:N])*uFull[0:N,i])
# Second order RHS term
nonlin2,uk3,uu,A,AStar,B,BStar,C,CStar,D,DStar = t2ModelKdV(u_full,nonlin0,uuStar,alpha,F_modes,G_modes,k,epsilon)
R2RHS = nonlin2*tFull[i]**(2*(1-tau))
R2Energy[:,i] = np.real(R2RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R2RHS[0:N])*uFull[0:N,i])
# Third order RHS term
nonlin3,uk6,E,EStar,F,FStar = t3ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,A,AStar,B,BStar,C,CStar,DStar)
R3RHS = nonlin3*tFull[i]**(3*(1-tau))
R3Energy[:,i] = np.real(R3RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R3RHS[0:N])*uFull[0:N,i])
# Fourth order RHS term
nonlin4 = t4ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,uk6,A,AStar,B,BStar,C,CStar,D,DStar,E,EStar,F,FStar)
R4RHS = nonlin4*tFull[i]**(4*(1-tau))
R4Energy[:,i] = np.real(R4RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R4RHS[0:N])*uFull[0:N,i])
##################################################
# Use least-squares fit to identify coefficients #
##################################################
for i in np.arange(30,numSteps):
exactEnergySnip = exactEnergy[:,0:i]
R0EnergySnip = R0Energy[:,0:i]
R1EnergySnip = R1Energy[:,0:i]
R2EnergySnip = R2Energy[:,0:i]
R3EnergySnip = R3Energy[:,0:i]
R4EnergySnip = R4Energy[:,0:i]
# t-model coefficient
coeffsArray1[j,i-30,:] = np.sum((exactEnergySnip - R0EnergySnip)*R1EnergySnip)/np.sum(R1EnergySnip*R1EnergySnip)
# t2-model coefficient
LSMatrix = (np.array([[np.sum(R1EnergySnip*R1EnergySnip),np.sum(R1EnergySnip*R2EnergySnip)],
[np.sum(R2EnergySnip*R1EnergySnip),np.sum(R2EnergySnip*R2EnergySnip)]]))
LSb = (np.array([np.sum(R1EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray2[j,i-30,:] = np.linalg.solve(LSMatrix,LSb)
# t3-model coefficient
LSMatrix = (np.array([[np.sum(R1EnergySnip*R1EnergySnip),np.sum(R1EnergySnip*R2EnergySnip),np.sum(R1EnergySnip*R3EnergySnip)],
[np.sum(R2EnergySnip*R1EnergySnip),np.sum(R2EnergySnip*R2EnergySnip),np.sum(R2EnergySnip*R3EnergySnip)],
[np.sum(R3EnergySnip*R1EnergySnip),np.sum(R3EnergySnip*R2EnergySnip),np.sum(R3EnergySnip*R3EnergySnip)]]))
LSb = (np.array([np.sum(R1EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R3EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray3[j,i-30,:] = np.linalg.solve(LSMatrix,LSb)
# t4-model coefficient
LSMatrix = (np.array([[np.sum(R1EnergySnip*R1EnergySnip),np.sum(R1EnergySnip*R2EnergySnip),np.sum(R1EnergySnip*R3EnergySnip),np.sum(R1EnergySnip*R4EnergySnip)],
[np.sum(R2EnergySnip*R1EnergySnip),np.sum(R2EnergySnip*R2EnergySnip),np.sum(R2EnergySnip*R3EnergySnip),np.sum(R2EnergySnip*R4EnergySnip)],
[np.sum(R3EnergySnip*R1EnergySnip),np.sum(R3EnergySnip*R2EnergySnip),np.sum(R3EnergySnip*R3EnergySnip),np.sum(R3EnergySnip*R4EnergySnip)],
[np.sum(R4EnergySnip*R1EnergySnip),np.sum(R4EnergySnip*R2EnergySnip),np.sum(R4EnergySnip*R3EnergySnip),np.sum(R4EnergySnip*R4EnergySnip)]]))
LSb = (np.array([np.sum(R1EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R3EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R4EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray4[j,i-30,:] = np.linalg.solve(LSMatrix,LSb)
# t2-model with *no* t-model
coeffsArray2only[j,i-30,:] = np.sum((exactEnergySnip - R0EnergySnip)*R2EnergySnip)/np.sum(R2EnergySnip*R2EnergySnip)
# t2-model and t4-model with *no* t-model or t3-model
LSMatrix = (np.array([[np.sum(R2EnergySnip*R2EnergySnip),np.sum(R2EnergySnip*R4EnergySnip)],
[np.sum(R4EnergySnip*R2EnergySnip),np.sum(R4EnergySnip*R4EnergySnip)]]))
LSb = (np.array([np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R4EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray24only[j,i-30,:] = np.linalg.solve(LSMatrix,LSb)
for ind in np.arange(Nlist.shape[0]):
fig1,ax1 = plt.subplots(2,2)
fig1.suptitle("N = "+str(Nlist[ind]))
ax1[0,0].plot(timesteps[30:],coeffsArray1[ind,:,0],color = "blue")
ax1[0,0].plot(timesteps[30:],coeffsArray2[ind,:,0],color = "red")
ax1[0,0].plot(timesteps[30:],coeffsArray3[ind,:,0],color = "green")
ax1[0,0].plot(timesteps[30:],coeffsArray4[ind,:,0],color = "black")
ax1[0,0].set_title("t-model")
ax1[0,1].plot([],[],color = "blue")
ax1[0,1].plot(timesteps[30:],coeffsArray2[ind,:,1], color = "red")
ax1[0,1].plot(timesteps[30:],coeffsArray3[ind,:,1], color = "green")
ax1[0,1].plot(timesteps[30:],coeffsArray4[ind,:,1], color = "black")
ax1[0,1].plot(timesteps[30:],coeffsArray2only[ind,:,0],color = "cyan")
ax1[0,1].plot(timesteps[30:],coeffsArray24only[ind,:,0], color = "magenta")
ax1[0,1].set_title("t2-model")
ax1[0,1].legend(["First order","Second order","Third order","Fourth order","Only t2","t2 and t4"],prop = {"size":5})
ax1[1,0].plot(timesteps[30:],coeffsArray3[ind,:,2], color = "green")
ax1[1,0].plot(timesteps[30:],coeffsArray4[ind,:,2],color = "black")
ax1[1,0].set_title("t3-model")
ax1[1,1].plot(timesteps[30:],coeffsArray4[ind,:,3], color = "black")
ax1[1,1].plot(timesteps[30:],coeffsArray24only[ind,:,1], color = "magenta")
ax1[1,1].set_title("t4-model")
plt.tight_layout()
return coeffsArray1,coeffsArray2,coeffsArray3,coeffsArray4,coeffsArray2only,coeffsArray24only
def renormalizeWindow(fullM, endtime, width, Nlist, Mlist, epsilon, alpha, tau, timesteps, IC = np.sin, plots = False):
"""
Finds renormalization coefficients using sliding window least squares.
Parameters
----------
fullM : int
Size of full simulation to base fits on
endtime : int
Endtime of full simulation
width : float
Size of sliding window to use in fitting
Nlist : list of ints
List of resolutions for which to find coefficients
Mlist : list of ints
List of intermediary "full" simulations to use for ROMs
epsilon : float
size of linear term (stiffness)
alpha : float
degree of nonlinearity in KdV
tau : float
time decay modifier
timesteps : Numpy array
specific timesteps for which to save solution
IC : function handle
initial condition of simulation (default np.sin)
plots : boolean
Indicates whether to generate plots (default: False)
Returns
-------
coeeffsArray1 : Numpy array (length(Nlist),1)
Renormalization coefficients for t-model only
coeffsArray2 : Numpy array (length(Nlist),2)
Renormalization coefficients for t-model and t2-model only
coeffsArray3 : Numpy array (length(Nlist),3)
Renormalization coefficients for t1-t3-models
coeffsArray4 : Numpy array (length(Nlist),4)
Renormalization coefficients for t1-t4-models
coeffsArray2only : Numpy array (length(Nlist),1)
Renormalization coefficients for t2-model only
coeffsArray24only : Numpy array (length(Nlist),2)
Renormalization coefficients for t2-model and t4-model only
fitLines : Dict
Contains scaling law fits for each ROM coefficients
of form c = -b * N^a
Terms given are a, b, and r (correlation coefficient of fit)
"""
# Check if full simulation has already been constructed
# if so, load it, if not, generate it
try:
uFull = np.load("u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__+".npy")
tFull = np.load("t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__+".npy")
except:
fullParams = {
'N': fullM,
'M': int(3/2*fullM),
'alpha': 1,
'epsilon': epsilon,
'tau': 1,
'coeffs': None,
'IC': IC,
'endtime': endtime,
'timesteps': timesteps
}
uSimFull = runSim(fullParams)
uFull = uSimFull.y
tFull = uSimFull.t
np.save( "u" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__,uFull)
np.save( "t" + str(fullM) + "t" + str(endtime)+"e"+str(round(epsilon,2)).replace('.','p')+IC.__name__,tFull)
# recover number of timesteps
numSteps = tFull.shape[0]
widthSteps = round(width/(tFull[1]-tFull[0]))
# initialize output arrays
coeffsArray1 = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,1))
coeffsArray2 = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,2))
coeffsArray3 = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,3))
coeffsArray4 = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,4))
coeffsArray2only = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,1))
coeffsArray24only = np.zeros((Nlist.shape[0],numSteps - widthSteps+1,2))
exact1 = np.zeros((Nlist.shape[0],1))
exact2 = np.zeros((Nlist.shape[0],2))
exact3 = np.zeros((Nlist.shape[0],3))
exact4 = np.zeros((Nlist.shape[0],4))
exact2o = np.zeros((Nlist.shape[0],1))
exact24o = np.zeros((Nlist.shape[0],2))
# loop through all resolutions
for j in np.arange(0,Nlist.shape[0]):
# Find number of positive terms in ROM, in intermediate calculations, and wavenumber array
N = Nlist[j]
M = Mlist[j]
k = np.concatenate([np.arange(0,M),np.arange(-M,0)])
# Gather first derivative data for fitting purposes
exactEnergy = np.zeros((N,numSteps))
R0Energy = np.zeros((N,numSteps))
R1Energy = np.zeros((N,numSteps))
R2Energy = np.zeros((N,numSteps))
R3Energy = np.zeros((N,numSteps))
R4Energy = np.zeros((N,numSteps))
# plug exact solution into exact RHS and all ROM terms and find energy contribution of each
for i in np.arange(0,numSteps):
# exact RHS
exactRHS,dummyU = markovKdV(uFull[:,i],int(fullM*3/2),alpha)
exactEnergy[:,i] = np.real(exactRHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(exactRHS[0:N])*uFull[0:N,i])
# Markov RHS
nonlin0,u_full = markovKdV(uFull[0:N,i],M,alpha)
R0RHS = nonlin0
R0Energy[:,i] = np.real(R0RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R0RHS[0:N])*uFull[0:N,i])
# First order RHS term
F_modes = np.concatenate([np.arange(0,N),np.arange(2*N-1,M+N+2),np.arange(2*M-N+1,2*M)])
G_modes = np.arange(N,2*M-N+1)
nonlin1,uuStar = tModelKdV(u_full,nonlin0,alpha,F_modes)
if tFull[i] == 0:
R1RHS = nonlin1*0
else:
R1RHS = nonlin1*tFull[i]**(1-tau)
R1Energy[:,i] = np.real(R1RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R1RHS[0:N])*uFull[0:N,i])
# Second order RHS term
nonlin2,uk3,uu,A,AStar,B,BStar,C,CStar,D,DStar = t2ModelKdV(u_full,nonlin0,uuStar,alpha,F_modes,G_modes,k,epsilon)
R2RHS = nonlin2*tFull[i]**(2*(1-tau))
R2Energy[:,i] = np.real(R2RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R2RHS[0:N])*uFull[0:N,i])
# Third order RHS term
nonlin3,uk6,E,EStar,F,FStar = t3ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,A,AStar,B,BStar,C,CStar,DStar)
R3RHS = nonlin3*tFull[i]**(3*(1-tau))
R3Energy[:,i] = np.real(R3RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R3RHS[0:N])*uFull[0:N,i])
# Fourth order RHS term
nonlin4 = t4ModelKdV(alpha,F_modes,G_modes,k,epsilon,u_full,uu,uuStar,uk3,uk6,A,AStar,B,BStar,C,CStar,D,DStar,E,EStar,F,FStar)
R4RHS = nonlin4*tFull[i]**(4*(1-tau))
R4Energy[:,i] = np.real(R4RHS[0:N]*np.conj(uFull[0:N,i]) + np.conj(R4RHS[0:N])*uFull[0:N,i])
exact1[j,:] = np.sum((exactEnergy - R0Energy)*R1Energy)/np.sum(R1Energy*R1Energy)
LSMatrix = (np.array([[np.sum(R1Energy*R1Energy),np.sum(R1Energy*R2Energy)],
[np.sum(R2Energy*R1Energy),np.sum(R2Energy*R2Energy)]]))
LSb = (np.array([np.sum(R1Energy*(exactEnergy-R0Energy)),np.sum(R2Energy*(exactEnergy-R0Energy))]))
exact2[j,:] = np.linalg.solve(LSMatrix,LSb)
LSMatrix = (np.array([[np.sum(R1Energy*R1Energy),np.sum(R1Energy*R2Energy),np.sum(R1Energy*R3Energy)],
[np.sum(R2Energy*R1Energy),np.sum(R2Energy*R2Energy),np.sum(R2Energy*R3Energy)],
[np.sum(R3Energy*R1Energy),np.sum(R3Energy*R2Energy),np.sum(R3Energy*R3Energy)]]))
LSb = (np.array([np.sum(R1Energy*(exactEnergy-R0Energy)),np.sum(R2Energy*(exactEnergy-R0Energy)),np.sum(R3Energy*(exactEnergy-R0Energy))]))
exact3[j,:] = np.linalg.solve(LSMatrix,LSb)
LSMatrix = (np.array([[np.sum(R1Energy*R1Energy),np.sum(R1Energy*R2Energy),np.sum(R1Energy*R3Energy),np.sum(R1Energy*R4Energy)],
[np.sum(R2Energy*R1Energy),np.sum(R2Energy*R2Energy),np.sum(R2Energy*R3Energy),np.sum(R2Energy*R4Energy)],
[np.sum(R3Energy*R1Energy),np.sum(R3Energy*R2Energy),np.sum(R3Energy*R3Energy),np.sum(R3Energy*R4Energy)],
[np.sum(R4Energy*R1Energy),np.sum(R4Energy*R2Energy),np.sum(R4Energy*R3Energy),np.sum(R4Energy*R4Energy)]]))
LSb = (np.array([np.sum(R1Energy*(exactEnergy-R0Energy)),np.sum(R2Energy*(exactEnergy-R0Energy)),np.sum(R3Energy*(exactEnergy-R0Energy)),np.sum(R4Energy*(exactEnergy-R0Energy))]))
exact4[j,:] = np.linalg.solve(LSMatrix,LSb)
exact2o[j,:] = np.sum((exactEnergy - R0Energy)*R2Energy)/np.sum(R2Energy*R2Energy)
LSMatrix = (np.array([[np.sum(R2Energy*R2Energy),np.sum(R2Energy*R4Energy)],
[np.sum(R4Energy*R2Energy),np.sum(R4Energy*R4Energy)]]))
LSb = (np.array([np.sum(R2Energy*(exactEnergy-R0Energy)),np.sum(R4Energy*(exactEnergy-R0Energy))]))
exact24o[j,:] = np.linalg.solve(LSMatrix,LSb)
##################################################
# Use least-squares fit to identify coefficients #
##################################################
for i in np.arange(0,numSteps-widthSteps+1):
exactEnergySnip = exactEnergy[:,i:i+widthSteps]
R0EnergySnip = R0Energy[:,i:i+widthSteps]
R1EnergySnip = R1Energy[:,i:i+widthSteps]
R2EnergySnip = R2Energy[:,i:i+widthSteps]
R3EnergySnip = R3Energy[:,i:i+widthSteps]
R4EnergySnip = R4Energy[:,i:i+widthSteps]
# t-model coefficient
coeffsArray1[j,i,:] = np.sum((exactEnergySnip - R0EnergySnip)*R1EnergySnip)/np.sum(R1EnergySnip*R1EnergySnip)
# t2-model coefficient
LSMatrix = (np.array([[np.sum(R1EnergySnip*R1EnergySnip),np.sum(R1EnergySnip*R2EnergySnip)],
[np.sum(R2EnergySnip*R1EnergySnip),np.sum(R2EnergySnip*R2EnergySnip)]]))
LSb = (np.array([np.sum(R1EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray2[j,i,:] = np.linalg.solve(LSMatrix,LSb)
# t3-model coefficient
LSMatrix = (np.array([[np.sum(R1EnergySnip*R1EnergySnip),np.sum(R1EnergySnip*R2EnergySnip),np.sum(R1EnergySnip*R3EnergySnip)],
[np.sum(R2EnergySnip*R1EnergySnip),np.sum(R2EnergySnip*R2EnergySnip),np.sum(R2EnergySnip*R3EnergySnip)],
[np.sum(R3EnergySnip*R1EnergySnip),np.sum(R3EnergySnip*R2EnergySnip),np.sum(R3EnergySnip*R3EnergySnip)]]))
LSb = (np.array([np.sum(R1EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R3EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray3[j,i,:] = np.linalg.solve(LSMatrix,LSb)
# t4-model coefficient
LSMatrix = (np.array([[np.sum(R1EnergySnip*R1EnergySnip),np.sum(R1EnergySnip*R2EnergySnip),np.sum(R1EnergySnip*R3EnergySnip),np.sum(R1EnergySnip*R4EnergySnip)],
[np.sum(R2EnergySnip*R1EnergySnip),np.sum(R2EnergySnip*R2EnergySnip),np.sum(R2EnergySnip*R3EnergySnip),np.sum(R2EnergySnip*R4EnergySnip)],
[np.sum(R3EnergySnip*R1EnergySnip),np.sum(R3EnergySnip*R2EnergySnip),np.sum(R3EnergySnip*R3EnergySnip),np.sum(R3EnergySnip*R4EnergySnip)],
[np.sum(R4EnergySnip*R1EnergySnip),np.sum(R4EnergySnip*R2EnergySnip),np.sum(R4EnergySnip*R3EnergySnip),np.sum(R4EnergySnip*R4EnergySnip)]]))
LSb = (np.array([np.sum(R1EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R3EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R4EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray4[j,i,:] = np.linalg.solve(LSMatrix,LSb)
# t2-model with *no* t-model
coeffsArray2only[j,i,:] = np.sum((exactEnergySnip - R0EnergySnip)*R2EnergySnip)/np.sum(R2EnergySnip*R2EnergySnip)
# t2-model and t4-model with *no* t-model or t3-model
LSMatrix = (np.array([[np.sum(R2EnergySnip*R2EnergySnip),np.sum(R2EnergySnip*R4EnergySnip)],
[np.sum(R4EnergySnip*R2EnergySnip),np.sum(R4EnergySnip*R4EnergySnip)]]))
LSb = (np.array([np.sum(R2EnergySnip*(exactEnergySnip-R0EnergySnip)),np.sum(R4EnergySnip*(exactEnergySnip-R0EnergySnip))]))
coeffsArray24only[j,i,:] = np.linalg.solve(LSMatrix,LSb)
for ind in np.arange(Nlist.shape[0]):
fig1,ax1 = plt.subplots(2,2)
fig1.suptitle("N = "+str(Nlist[ind]))
ax1[0,0].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray1[ind,:,0],color = "blue")
ax1[0,0].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray2[ind,:,0],color = "red")
ax1[0,0].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray3[ind,:,0],color = "green")
ax1[0,0].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray4[ind,:,0],color = "black")
ax1[0,0].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact1[ind,0],exact1[ind,0]],color="blue",linestyle=":")
ax1[0,0].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact2[ind,0],exact2[ind,0]],color="red",linestyle=":")
ax1[0,0].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact3[ind,0],exact3[ind,0]],color="green",linestyle=":")
ax1[0,0].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact4[ind,0],exact4[ind,0]],color="black",linestyle=":")
ax1[0,0].set_title("t-model")
ax1[0,1].plot([],[],color = "blue")
ax1[0,1].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray2[ind,:,1], color = "red")
ax1[0,1].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray3[ind,:,1], color = "green")
ax1[0,1].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray4[ind,:,1], color = "black")
ax1[0,1].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray2only[ind,:,0],color = "cyan")
ax1[0,1].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray24only[ind,:,0], color = "magenta")
ax1[0,1].set_title("t2-model")
ax1[0,1].legend(["First order","Second order","Third order","Fourth order","Only t2","t2 and t4"],prop = {"size":5})
ax1[0,1].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact2[ind,1],exact2[ind,1]],color="red",linestyle=":")
ax1[0,1].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact3[ind,1],exact3[ind,1]],color="green",linestyle=":")
ax1[0,1].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact4[ind,1],exact4[ind,1]],color="black",linestyle=":")
ax1[0,1].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact2o[ind,0],exact2o[ind,0]],color="cyan",linestyle=":")
ax1[0,1].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact24o[ind,0],exact24o[ind,0]],color="magenta",linestyle=":")
ax1[1,0].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray3[ind,:,2], color = "green")
ax1[1,0].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray4[ind,:,2],color = "black")
ax1[1,0].set_title("t3-model")
ax1[1,0].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact3[ind,2],exact3[ind,2]],color="green",linestyle=":")
ax1[1,0].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact4[ind,2],exact4[ind,2]],color="black",linestyle=":")
ax1[1,1].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray4[ind,:,3], color = "black")
ax1[1,1].plot(timesteps[0:numSteps-widthSteps+1],coeffsArray24only[ind,:,1], color = "magenta")
ax1[1,1].set_title("t4-model")
ax1[1,1].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact4[ind,3],exact4[ind,3]],color="black",linestyle=":")
ax1[1,1].plot([timesteps[0],timesteps[numSteps-widthSteps+1]],[exact24o[ind,1],exact24o[ind,1]],color="magenta",linestyle=":")
plt.tight_layout()
return coeffsArray1,coeffsArray2,coeffsArray3,coeffsArray4,coeffsArray2only,coeffsArray24only
def renormalizeTau(fullM, endtime, Nlist, Mlist, epsilon, alpha, tauList, timesteps, IC = np.sin):
"""
Tests a range of tau values for fitting coefficients.
Parameters
----------
fullM : int
Resolution of full model to use for fitting
endtime : float
Final time to use for fitting
epsilon : float
size of linear term (stiffness)
alpha : float
degree of nonlinearity in KdV
timesteps : Numpy array
specific timesteps for which to save solution
tauList : float
Grid of tau values to test (default 0:0.05:1)
IC : function handle
initial condition of simulation (default np.sin)
Returns
-------
out : dict
Contains optimal coefficients for each model for each value of tau.
't-model' is len(tauList) x len(Nlist) x 1,
't2-model' is len(tauList) x len(Nlist) x 2, etc.
err :
"""
out = {"t-model" : np.zeros((tauList.shape[0],Nlist.shape[0],1)),
"t2-model" : np.zeros((tauList.shape[0],Nlist.shape[0],2)),
"t3-model" : np.zeros((tauList.shape[0],Nlist.shape[0],3)),
"t4-model" : np.zeros((tauList.shape[0],Nlist.shape[0],4)),
"t2-model only" : np.zeros((tauList.shape[0],Nlist.shape[0],1)),
"t2- and t4-models" : np.zeros((tauList.shape[0],Nlist.shape[0],2))
}
errList = []
for i in np.arange(tauList.shape[0]):
result = renormalize(fullM, endtime, Nlist, Mlist, epsilon, alpha, tauList[i], timesteps, IC = IC, plots = False)
out["t-model"][i,:,:] = result[0]
out["t2-model"][i,:,:] = result[1]
out["t3-model"][i,:,:] = result[2]
out["t4-model"][i,:,:] = result[3]
out["t2-model only"][i,:,:] = result[4]
out["t2- and t4-models"][i,:,:] = result[5]
errList.append(result[7])
return out,errList
def automatedROM(N,alpha,epsilon,timesteps,fitTime = 10,tauTests = np.arange(0,1.05,0.05),IC = np.sin,tol = 1e-3):
"""
Automatically finds optimal tau and coefficients for an ROM and runs the ROM.
Also produces reference exact solution
Parameters
----------
N : int
Resolution of ROM
alpha : float
degree of nonlinearity in KdV
epsilon : float
size of linear term (stiffness)
timesteps : Numpy array
specific timesteps for which to save solution
fitTime : float
Fits are made over window of exact solution from 0 to fitTime (default 10)
tauTests : float
Grid of tau values to test (default 0:0.05:1)
IC : function handle
initial condition of simulation (default np.sin)
tol : float
Tolerance for declaring full model "resolved"
There must be less than this relative error in the first half of the
full modes up to the end time (default 10^-3)
Returns
-------
simMarkov : SciPy integration object
Simulation up to end time of Markov model
sim2 : SciPy integration object
Simulation up to end time of 2nd order model model
sim4 : SciPy integration object
Simulation up to end time of fourth order model
coefficients :
errors
"""
endtime = timesteps[-1]
M = 16
unresolved = True
#print("Constructing reference exact solution...")
try:
fileList = glob.glob("u" + '[0-9]*' + "t" + str(int(endtime))+"e"+str(round(epsilon,2)).replace('.','p')+".npy")
myFile = fileList[0]
uFull =
|
np.load(myFile)
|
numpy.load
|
# This code was developed by <NAME>, 2021. <EMAIL>
import pandas as pd
import numpy as np
import copy
from simple_dispatch import StorageModel
from simple_dispatch import generatorData
from simple_dispatch import bidStack
from simple_dispatch import dispatch
from simple_dispatch import generatorDataShort
import scipy
class FutureGrid(object):
"""By <NAME>. This class manages the model of the future grid and implements dispatch / capacity calculations.
:param gd_short: The generator model
:type gd_short: An object of class `generatorDataShort` from `simple_dispatch.py`
:param unit_drops: Information about which generators are retired in each year
:type unit_drops: Dataframe
:param additions_df: Information about which generators are added each year
:type additions_df: Dataframe
:param year: Year for the future grid
:type year: int
:param future: Future grid demand, including EV demand
:type future: An object of class `FutureDemand` from later in this file
:param stor_df: Demand that needs to be met by storage; passed to storage model object
:type stor_df: Dataframe
:param storage: Storage model
:type storage: An object of the class `StorageModel` from `simple_dispatch.py`
:param bs: Bidstack
:type bs: An object of the class `bidStack` by Thomas Deetjen from `simple_dispatch.py`
:param dp: Dispatch
:type dp: An object of the class `dispatch` by Thomas Deetjen from `simple_dispatch.py`
"""
def __init__(self, gd_short):
self.gd_short = gd_short
self.gd_short_original = copy.deepcopy(gd_short)
self.unit_drops = pd.read_csv('IntermediateOutputs/scheduled_retirements_2019.csv', index_col=0)
self.additions_df = pd.read_csv('IntermediateOutputs/generator_additions.csv', index_col=0)
self.year = None
self.future = None
self.stor_df = None
self.storage = None
self.bs = None
self.dp = None
def add_generators(self, future_year):
"""Duplicate generators to simulate new additions in the future WECC grid."""
gd_short_final = copy.deepcopy(self.gd_short)
added_units = self.additions_df[self.additions_df['Year']<future_year]['orispl_unit'].values
for i, val in enumerate(added_units):
idx = len(gd_short_final.df)
loc1 = gd_short_final.df[gd_short_final.df['orispl_unit']==val].index
gd_short_final.df = pd.concat((gd_short_final.df, gd_short_final.df.loc[loc1]), ignore_index=True)
gd_short_final.df.loc[idx, 'orispl_unit'] = 'added_'+str(i)
self.gd_short = copy.deepcopy(gd_short_final)
def add_generators_sensitivity(self, fuel_col='is_gas', percent_increase_of_total_ffcap=0.2):
# Duplicate existing plants, youngest and cheapest, to add 20% (or other) of existing fossil fuel capacity
captotal = self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].mw.sum()
uptoind = np.where(np.cumsum(self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].sort_values(by=['year_online', 'vom'], ascending=[False, True]).loc[:, ['mw', 'year_online', 'vom']]['mw']) > percent_increase_of_total_ffcap*(captotal))[0][0]
new_additions = pd.DataFrame({'orispl_unit':self.gd_short.df.loc[self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].sort_values(by=['year_online', 'vom'], ascending=[False, True]).index.values[np.arange(0, uptoind)], 'orispl_unit'].values})
new_additions['Year'] = 2022
gd_short_final = copy.deepcopy(self.gd_short)
# added_units = self.additions_df[self.additions_df['Year']<future_year]['orispl_unit'].values
for i, val in enumerate(new_additions['orispl_unit'].values):
idx = len(gd_short_final.df)
loc1 = gd_short_final.df[gd_short_final.df['orispl_unit']==val].index
gd_short_final.df = pd.concat((gd_short_final.df, gd_short_final.df.loc[loc1]), ignore_index=True)
gd_short_final.df.loc[idx, 'orispl_unit'] = 'added_'+str(i)
self.gd_short = copy.deepcopy(gd_short_final)
def drop_generators_sensitivity(self, fuel_col='is_gas', percent_decrease_of_total_ffcap=0.2):
# Drop existing plants, oldest and most expensive, to drop 20% (or other) of existing fossil fuel capacity
captotal = self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].mw.sum()
uptoind = np.where(np.cumsum(self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].sort_values(by=['year_online', 'vom'], ascending=[True, False]).loc[:, ['mw', 'year_online', 'vom']]['mw']) > percent_decrease_of_total_ffcap*(captotal))[0][0]
new_drops = pd.DataFrame({'orispl_unit':self.gd_short.df.loc[self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].sort_values(by=['year_online', 'vom'], ascending=[True, False]).index.values[np.arange(0, uptoind)], 'orispl_unit'].values})
new_drops['Year'] = 2022
gd_short_final = copy.deepcopy(self.gd_short)
# dropped_units = new_drops[new_drops['retirement_year']<future_year]['orispl_unit'].values
gd_short_final.df = gd_short_final.df[~gd_short_final.df['orispl_unit'].isin(new_drops)].copy(deep=True).reset_index(drop=True)
self.gd_short = copy.deepcopy(gd_short_final)
def drop_generators(self, future_year):
"""Drop generators to match announced retirements in the WECC grid."""
gd_short_final = copy.deepcopy(self.gd_short)
dropped_units = self.unit_drops[self.unit_drops['retirement_year']<future_year]['orispl_unit'].values
gd_short_final.df = gd_short_final.df[~gd_short_final.df['orispl_unit'].isin(dropped_units)].copy(deep=True).reset_index(drop=True)
self.gd_short = copy.deepcopy(gd_short_final)
def change_gas_prices(self, fuel):
"""Change fuel prices for gas generators to test sensitivity."""
gd_short_final = copy.deepcopy(self.gd_short)
inds = gd_short_final.df[gd_short_final.df['fuel'].isin(['ng', 'og'])].index
gd_short_final.df.loc[inds, ['fuel_price'+str(i) for i in np.arange(1, 53)]] = fuel*gd_short_final.df.loc[inds, ['fuel_price'+str(i) for i in np.arange(1, 53)]]
self.gd_short = copy.deepcopy(gd_short_final)
def set_up_scenario(self, year=2030, solar=2.5, wind=2.5, fuel=1.0, ev_pen=1.0,
ev_scenario='High Home', ev_timers='', ev_workplace_control='',
ev_workplace_bool=False, evs_bool=True, ev_scenario_date='20211119',
weekend_timers='', weekend_date='20211119', ev_folder=None, generator_sensitivity=False, fuel_col='is_gas', generator_sensitivity_type='add', percent_increase_of_total_ffcap=0.2, percent_decrease_of_total_ffcap=0.2):
"""Set up scenario of future demand."""
# drop and add generators
self.year = year
if year != 2019:
self.add_generators(year)
self.drop_generators(year)
if generator_sensitivity:
if generator_sensitivity_type=='add':
self.add_generators_sensitivity(fuel_col=fuel_col, percent_increase_of_total_ffcap=percent_increase_of_total_ffcap)
else:
self.drop_generators_sensitivity(fuel_col=fuel_col, percent_decrease_of_total_ffcap=percent_decrease_of_total_ffcap)
# change fuel prices
if fuel != 1.0:
self.change_gas_prices(fuel)
# model future demand
self.future = FutureDemand(self.gd_short, year=year)
if year != 2019:
self.future.electrification(scale_vs_given=True) # electrification in other sectors
# adjust renewables levels
self.future.solar_multiplier[year] = solar
self.future.wind_multiplier[year] = wind
self.future.solar()
self.future.wind()
# add EVs
if evs_bool:
if ev_workplace_bool:
self.future.evs(pen_level=ev_pen, scenario_name=ev_scenario, timers_extra_info=ev_timers, wp_control=ev_workplace_control, scenario_date=ev_scenario_date, timers_extra_info_weekends=weekend_timers, weekend_date=weekend_date, folder=ev_folder)
else:
self.future.evs(pen_level=ev_pen, scenario_name=ev_scenario, timers_extra_info=ev_timers, scenario_date=ev_scenario_date, timers_extra_info_weekends=weekend_timers, weekend_date=weekend_date, folder=ev_folder)
# update
self.future.update_total()
def check_overgeneration(self, save_str=None, extra_save_str='', change_demand=True):
"""Check for negative demand. Clip and save overgeneration amount."""
if self.future.demand['demand'].min() < 0:
if save_str is not None:
self.future.demand.loc[self.future.demand['demand'] < 0].to_csv(save_str+'_overgeneration'+extra_save_str+'.csv', index=None)
if change_demand:
self.future.demand['demand'] = self.future.demand['demand'].clip(0, 1e10)
def run_storage_before_capacitydispatch(self, cap, max_rate, allow_negative=False):
"""If running storage on net demand before dispatch, do that here."""
self.stor_df = pd.DataFrame({'datetime': pd.to_datetime(self.future.demand['datetime'].values),
'total_demand': self.future.demand['demand'].values})
self.storage = StorageModel(self.stor_df)
self.storage.calculate_operation_beforecapacity(cap, max_rate, allow_negative=allow_negative)
def run_dispatch(self, max_penlevel, save_str, result_date='20220330', return_generator_limits=False, thermal_storage=False, force_storage=False):
"""Run the dispatch. max_penlevel indicates whether storage will be needed or whether the model will break
without it, but the try except clause will ensure the simulation is run if that is incorrect."""
self.bs = bidStack(self.gd_short, co2_dol_per_kg=0, time=1, dropNucHydroGeo=True, include_min_output=False, mdt_weight=0.5, include_easiur=False)
self.dp = dispatch(self.bs, self.future.demand, time_array=scipy.arange(52)+1, return_generator_limits=return_generator_limits)
if ((self.future.ev_pen_level <= max_penlevel) and not force_storage):
try:
self.dp.calcDispatchAll()
if save_str is not None:
self.dp.df.to_csv(save_str+'_dpdf_'+result_date+'.csv', index=False)
except:
print('Error!')
pd.DataFrame({'Error':['Needed storage in dispatch'], 'Case':[save_str]}, index=[0]).to_csv(save_str+'_error_record.csv', index=False)
print('----Capacity too low----')
print('Try with storage:')
self.dp = dispatch(self.bs, self.future.demand, time_array=scipy.arange(52)+1, include_storage=True, return_generator_limits=return_generator_limits)
self.dp.calcDispatchAll()
if save_str is not None:
self.dp.df.to_csv(save_str+'_withstorage'+'_dpdf_'+result_date+'.csv', index=False)
self.dp.storage_df['total_demand'] = self.dp.df.demand
self.storage = StorageModel(self.dp.storage_df)
if thermal_storage:
self.storage.calculate_minbatt_forcapacity_thermal(limityear=self.year)
else:
self.storage.calculate_minbatt_forcapacity()
print('Storage Rate Result:', int(self.storage.min_maxrate))
print('Storage Capacity: ', int(self.storage.min_capacity))
if save_str is not None:
self.storage.df.to_csv(save_str+'_storage_operations_'+result_date+'.csv', index=False)
self.storage_stats = pd.DataFrame({'Storage Rate Result':int(self.storage.min_maxrate),'Storage Capacity':int(self.storage.min_capacity)}, index=[0])
if save_str is not None:
self.storage_stats.to_csv(save_str+'_storage_stats_'+result_date+'.csv', index=False)
else:
print('----Capacity too low----')
print('Try with storage:')
self.dp = dispatch(self.bs, self.future.demand, time_array=scipy.arange(52)+1, include_storage=True, return_generator_limits=return_generator_limits)
self.dp.calcDispatchAll()
if save_str is not None:
self.dp.df.to_csv(save_str+'_withstorage'+'_dpdf_'+result_date+'.csv', index=False)
self.storage = StorageModel(self.dp.storage_df)
if thermal_storage:
self.storage.calculate_minbatt_forcapacity_thermal(limityear=self.year)
else:
self.storage.calculate_minbatt_forcapacity()
print('Storage Rate Result:', int(self.storage.min_maxrate))
print('Storage Capacity: ', int(self.storage.min_capacity))
if save_str is not None:
self.storage.df.to_csv(save_str+'_storage_operations_'+result_date+'.csv', index=False)
self.storage_stats = pd.DataFrame({'Storage Rate Result':int(self.storage.min_maxrate),'Storage Capacity':int(self.storage.min_capacity)}, index=[0])
if save_str is not None:
self.storage_stats.to_csv(save_str+'_storage_stats_'+result_date+'.csv', index=False)
def find_capacity_limit_1_binarysearch(self, bs_limits=None, lims_8760=None, year=2035, solar=3.5, wind=3,
fuel=1.0, ev_scenario='HighHome', ev_timers='',
ev_workplace_control='', ev_workplace_bool=False, evs_bool=True,
ev_scenario_date='20220408', with_storage_before=False, cap=None,
max_rate=None, minpen=0.01, weekend_timers=None, weekend_date=None):
"""Find capacity limits. To avoid starting the search from 1% adoption each time, this method does a short
search to find which quadrant to start looking in. It returns just the 1-hour breaking point."""
if weekend_timers is None:
weekend_timers = ev_timers
if weekend_date is None:
weekend_date = ev_scenario_date
violated1 = False
limit1 = 0
if lims_8760 is None:
lims_8760 = np.concatenate((np.repeat(bs_limits['Max Capacity'], (24*7)), np.repeat(np.array(bs_limits.loc[51, 'Max Capacity']), 24)))
print('Short Binary Search: ')
penlevel = np.round((minpen+1)/2, 2)
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool,
ev_scenario_date=ev_scenario_date, weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate, allow_negative=True)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
mid = copy.copy(penlevel) # 0.5
penlevel =
|
np.round((mid+1)/2, 2)
|
numpy.round
|
# -*- coding: utf-8 -*-
"""
A Python 2.7 implementation of grid-based Monte Carlo (MC) sampling for the Bayesian
inversion of a CRN depth profile considered in Laloy et al. (2017). This MC sampling
follows the procedure described in Marrero et al. (2016). Also, this "run_mc" script is
separated in "#%%" sections that can conveniently be run separately with Spyder.
@author: <NAME> <<EMAIL>>, January 2017.
License: MIT.
Please drop me an email if you have any question and/or if you find a bug in this
program.
Also, if you find this code useful please cite the paper for which it has been
developed (Laloy et al., 2017).
References:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
Bayesian inversion of a CRN depth profile to infer Quaternary erosion of the
northwestern Campine Plateau (NE Belgium), Earth Surf. Dynam., 5, 331–345,
2017, https://doi.org/10.5194/esurf-5-331-2017.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
Cosmogenic nuclide systematics and the CRONUScalc program, Quat. Geochronol.
31, 199-219, 2016.
"""
import os
main_dir=r'D:\CRN_ProbabilisticInversion\MC_Inversion' # Set working directory
os.chdir(main_dir)
import time
import numpy as np
import mc
#% Set rng_seed
rng_seed=1 # np.random.seed(np.floor(time.time()).astype('int'))
# Other settings
npar=3 # Erosion rate, Age, Ninh
#Bounds
lb=np.array([2.0,0.,1e4]).reshape(1,npar)
ub=np.array([60.0,1e6,9e4]).reshape(1,npar)
Prior=Prior='Prior_CRN_1'
#'Prior_CRN_1': Gaussian prior for erosion rate, uniform priors for Age and Ninh
#'Uniform': uniform prior for every parameter
sampling_strategy='Lattice' # 'Lattice' or 'LHS' (latin hypercube sampling)
ndiv=60 # Number of divisions per dimension in case of Lattice
# Total number of samples is calculated by ndiv**npar
steps= 10#ndiv# Number of sampling rounds (only useful to store intermdiate results)
meas_filename='CRN_data.txt'
if __name__ == '__main__':
start_time = time.time()
q=mc.Sampler(n=npar,Prior=Prior,sampling_strategy=sampling_strategy,
ndiv=ndiv,DoParallel=False,parallel_jobs=8,rng_seed=rng_seed,
lb=lb,ub=ub,meas_filename=meas_filename,steps=steps)
print("Iterating")
tmpFilePath=None # None or main_dir+'\out_tmp.pkl'
Xi, prior, of, lik, log_lik, fx, MCPar, MCVar, Extra, Measurement = q.sample(RestartFilePath=tmpFilePath)
end_time = time.time()
print("This sampling run took %5.4f seconds." % (end_time - start_time))
#%%
#import os
#main_dir=r'D:\CRN_ProbabilisticInversion\MC_Inversion'
#os.chdir(main_dir)
#import numpy as np
import cPickle as pickle
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
with open('mc_out'+'.pkl','rb') as f:
tmp_obj=pickle.load(f)
try:
Xi=tmp_obj['Xi']
of=tmp_obj['of']
lik=tmp_obj['lik']
log_lik=tmp_obj['log_lik']
prior=tmp_obj['prior']
fx=tmp_obj['fx']
MCPar=tmp_obj['MCPar']
Measurement=tmp_obj['Measurement']
Extra=tmp_obj['Extra']
MCMCVar=tmp_obj['MCVar']
Modelname=tmp_obj['ModelName']
print(MCVar.Iter)
except:
pass
del tmp_obj
RecompLik=False
if RecompLik==True: # Recompute Gaussian likelihood using a different sigma_e that is consistent with the actual data misfit
sigma_e=10000
try:
del lik, log_lik
except:
pass
lik=np.zeros((Xi.shape[0],1))
log_lik=np.zeros((Xi.shape[0],1))
for ii in xrange(Xi.shape[0]):
SSR = (of[ii,0]**2)*Measurement.N# of is RMSE
log_lik[ii,0]= - ( Measurement.N / 2.0) * np.log(2.0 * np.pi) - Measurement.N * np.log( sigma_e ) - 0.5 * np.power(sigma_e,-2.0) * SSR
lik[ii,0]=(1.0/np.sqrt(2*np.pi* sigma_e**2))**Measurement.N * np.exp(- 0.5 * np.power(sigma_e,-2.0) * SSR)
#%% Check number of no-runs because the E x t limits are exceeded:
vv=Xi[:,0]*Xi[:,1]*1e-6
qq=np.where((vv<1) | (vv>35))
print('The effective number of model runs is: ',Xi.shape[0]-qq[0].size)
#%% Set zero likelihood to those runs for which E x t is out of bounds
lik[qq,0]=0
log_lik[qq,0]=-1e300
#%%
# Turn log-likelihood into a rescaled likelihood (rlik)
# Rescaling is better because of numerical underflow issues with very small likelhioods
# (yet here this only solves the numerical underflow for a rather tiny fraction of the zero-likelihood parameter sets)
ii=np.where(log_lik==np.max(log_lik));print(ii)
ii=np.where(lik==np.max(lik));print(ii)
rlik=np.exp(log_lik-np.max(log_lik))
ii=np.where(rlik==np.max(rlik));print(ii)
# Reshape prior and rlik for marginalization
prior3=np.zeros((MCPar.ndiv,MCPar.ndiv,MCPar.ndiv))
rlik3=np.zeros((MCPar.ndiv,MCPar.ndiv,MCPar.ndiv))
teller=0
for ii in xrange(0,MCPar.ndiv):
for jj in xrange(0,MCPar.ndiv):
for kk in xrange(0,MCPar.ndiv):
prior3[ii,jj,kk]=prior[teller,0]
rlik3[ii,jj,kk]=rlik[teller,0]
teller=teller+1
# Marginalize using trapzoidal integration rule
# Calculate the posterior distribution using trapzoidal integration rule to
# estimate the evidence
Eros_rate=MCPar.pardiv[0]
Age=MCPar.pardiv[1]
Ninh=MCPar.pardiv[2]
evid=np.trapz(np.trapz(np.trapz(rlik3*prior3,Ninh,axis=2),Age,axis=1),Eros_rate,axis=0)
posterior3=rlik3*prior3/evid
posterior1=posterior3.flatten()
ii=np.where((log_lik+np.log(prior))==np.max((log_lik+np.log(prior))));print(ii)
ii=np.where(posterior1==
|
np.max(posterior1)
|
numpy.max
|
# PyVot Python Variational Optimal Transportation
# Author: <NAME> <<EMAIL>>
# Date: April 28th 2020
# Licence: MIT
import os
import sys
import torch
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from vot_torch import VWB, UVWB
import utils
np.random.seed(19)
# Generate data
mean1 = [0., -0.2]
cov1 = [[0.04, 0], [0, 0.04]]
x1, y1 = np.random.multivariate_normal(mean1, cov1, 500).T
x1 = np.stack((x1, y1), axis=1).clip(-0.99, 0.99)
mean2 = [0.5, 0.5]
cov2 = [[0.01, 0], [0, 0.01]]
x2, y2 = np.random.multivariate_normal(mean2, cov2, 200).T
x2 = np.stack((x2, y2), axis=1).clip(-0.99, 0.99)
mean3 = [-0.5, 0.5]
cov3 = [[0.01, 0], [0, 0.01]]
x3, y3 = np.random.multivariate_normal(mean3, cov3, 200).T
x3 = np.stack((x3, y3), axis=1).clip(-0.99, 0.99)
x0 = np.concatenate((x1, x2, x3), axis=0)
mean = [0.0, 0.0]
cov = [[0.02, 0], [0, 0.02]]
K = 3
x, y = np.random.multivariate_normal(mean, cov, K).T
x =
|
np.stack((x, y), axis=1)
|
numpy.stack
|
import unittest
import numpy as np
from numpy.testing import assert_array_equal
class TestArrayStats(unittest.TestCase):
def test_sum(self):
metrix = np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
self.assertEqual(
|
np.sum(metrix)
|
numpy.sum
|
"""
Geometry
========
Defines the objects related to geometrical computations:
- :func:`colour.algebra.normalise_vector`
- :func:`colour.algebra.euclidean_distance`
- :func:`colour.algebra.manhattan_distance`
- :func:`colour.algebra.extend_line_segment`
- :class:`colour.algebra.LineSegmentsIntersections_Specification`
- :func:`colour.algebra.intersect_line_segments`
- :func:`colour.algebra.ellipse_coefficients_general_form`
- :func:`colour.algebra.ellipse_coefficients_canonical_form`
- :func:`colour.algebra.point_at_angle_on_ellipse`
- :func:`colour.algebra.ellipse_fitting_Halir1998`
References
----------
- :cite:`Bourkea` : <NAME>. (n.d.). Intersection point of two line
segments in 2 dimensions. Retrieved January 15, 2016, from
http://paulbourke.net/geometry/pointlineplane/
- :cite:`Erdema` : <NAME>. (n.d.). Fast Line Segment Intersection.
Retrieved January 15, 2016, from
http://www.mathworks.com/matlabcentral/fileexchange/\
27205-fast-line-segment-intersection
- :cite:`Halir1998` : <NAME>., & <NAME>. (1998). Numerically Stable
Direct Least Squares Fitting Of Ellipses (pp. 1-8).
http://citeseerx.ist.psu.edu/viewdoc/download;\
jsessionid=BEEAFC85DE53308286D626302F4A3E3C?doi=10.1.1.1.7559&rep=rep1&type=pdf
- :cite:`Saeedna` : Saeedn. (n.d.). Extend a line segment a specific
distance. Retrieved January 16, 2016, from
http://stackoverflow.com/questions/7740507/\
extend-a-line-segment-a-specific-distance
- :cite:`Wikipedia` : Wikipedia. (n.d.). Ellipse. Retrieved November 24,
2018, from https://en.wikipedia.org/wiki/Ellipse
"""
from __future__ import annotations
import numpy as np
from dataclasses import dataclass
from colour.algebra import sdiv, sdiv_mode
from colour.hints import (
ArrayLike,
Floating,
FloatingOrArrayLike,
FloatingOrNDArray,
Literal,
NDArray,
Union,
cast,
)
from colour.utilities import (
CaseInsensitiveMapping,
as_float,
as_float_array,
ones,
tsplit,
tstack,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"normalise_vector",
"euclidean_distance",
"manhattan_distance",
"extend_line_segment",
"LineSegmentsIntersections_Specification",
"intersect_line_segments",
"ellipse_coefficients_general_form",
"ellipse_coefficients_canonical_form",
"point_at_angle_on_ellipse",
"ellipse_fitting_Halir1998",
"ELLIPSE_FITTING_METHODS",
"ellipse_fitting",
]
def normalise_vector(a: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Normalise given vector :math:`a`.
Parameters
----------
a
Vector :math:`a` to normalise.
Returns
-------
:class:`numpy.ndarray`
Normalised vector :math:`a`.
Examples
--------
>>> a = np.array([0.20654008, 0.12197225, 0.05136952])
>>> normalise_vector(a) # doctest: +ELLIPSIS
array([ 0.8419703..., 0.4972256..., 0.2094102...])
"""
a = as_float_array(a)
with sdiv_mode():
return sdiv(a, np.linalg.norm(a))
def euclidean_distance(a: ArrayLike, b: ArrayLike) -> FloatingOrNDArray:
"""
Return the *Euclidean* distance between point array :math:`a` and point
array :math:`b`.
For a two-dimensional space, the metric is as follows:
:math:`E_D = [(x_a - x_b)^2 + (y_a - y_b)^2]^{1/2}`
Parameters
----------
a
Point array :math:`a`.
b
Point array :math:`b`.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Euclidean* distance.
Examples
--------
>>> a = np.array([100.00000000, 21.57210357, 272.22819350])
>>> b = np.array([100.00000000, 426.67945353, 72.39590835])
>>> euclidean_distance(a, b) # doctest: +ELLIPSIS
451.7133019...
"""
return as_float(
np.linalg.norm(as_float_array(a) - as_float_array(b), axis=-1)
)
def manhattan_distance(a: ArrayLike, b: ArrayLike) -> FloatingOrNDArray:
"""
Return the *Manhattan* (or *City-Block*) distance between point array
:math:`a` and point array :math:`b`.
For a two-dimensional space, the metric is as follows:
:math:`M_D = |x_a - x_b| + |y_a - y_b|`
Parameters
----------
a
Point array :math:`a`.
b
Point array :math:`b`.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Manhattan* distance.
Examples
--------
>>> a = np.array([100.00000000, 21.57210357, 272.22819350])
>>> b = np.array([100.00000000, 426.67945353, 72.39590835])
>>> manhattan_distance(a, b) # doctest: +ELLIPSIS
604.9396351...
"""
return as_float(
np.sum(np.abs(as_float_array(a) - as_float_array(b)), axis=-1)
)
def extend_line_segment(
a: ArrayLike, b: ArrayLike, distance: Floating = 1
) -> NDArray:
"""
Extend the line segment defined by point arrays :math:`a` and :math:`b` by
given distance and return the new end point.
Parameters
----------
a
Point array :math:`a`.
b
Point array :math:`b`.
distance
Distance to extend the line segment.
Returns
-------
:class:`numpy.ndarray`
New end point.
References
----------
:cite:`Saeedna`
Notes
-----
- Input line segment points coordinates are 2d coordinates.
Examples
--------
>>> a = np.array([0.95694934, 0.13720932])
>>> b = np.array([0.28382835, 0.60608318])
>>> extend_line_segment(a, b) # doctest: +ELLIPSIS
array([-0.5367248..., 1.1776534...])
"""
x_a, y_a = tsplit(a)
x_b, y_b = tsplit(b)
d = euclidean_distance(a, b)
with sdiv_mode():
x_c = x_b + sdiv(x_b - x_a, d) * distance
y_c = y_b + sdiv(y_b - y_a, d) * distance
xy_c = tstack([x_c, y_c])
return xy_c
@dataclass
class LineSegmentsIntersections_Specification:
"""
Define the specification for intersection of line segments :math:`l_1` and
:math:`l_2` returned by :func:`colour.algebra.intersect_line_segments`
definition.
Parameters
----------
xy
Array of :math:`l_1` and :math:`l_2` line segments intersections
coordinates. Non existing segments intersections coordinates are set
with `np.nan`.
intersect
Array of *bool* indicating if line segments :math:`l_1` and :math:`l_2`
intersect.
parallel
Array of :class:`bool` indicating if line segments :math:`l_1` and
:math:`l_2` are parallel.
coincident
Array of :class:`bool` indicating if line segments :math:`l_1` and
:math:`l_2` are coincident.
"""
xy: NDArray
intersect: NDArray
parallel: NDArray
coincident: NDArray
def intersect_line_segments(
l_1: ArrayLike, l_2: ArrayLike
) -> LineSegmentsIntersections_Specification:
"""
Compute :math:`l_1` line segments intersections with :math:`l_2` line
segments.
Parameters
----------
l_1
:math:`l_1` line segments array, each row is a line segment such as
(:math:`x_1`, :math:`y_1`, :math:`x_2`, :math:`y_2`) where
(:math:`x_1`, :math:`y_1`) and (:math:`x_2`, :math:`y_2`) are
respectively the start and end points of :math:`l_1` line segments.
l_2
:math:`l_2` line segments array, each row is a line segment such as
(:math:`x_3`, :math:`y_3`, :math:`x_4`, :math:`y_4`) where
(:math:`x_3`, :math:`y_3`) and (:math:`x_4`, :math:`y_4`) are
respectively the start and end points of :math:`l_2` line segments.
Returns
-------
:class:`colour.algebra.LineSegmentsIntersections_Specification`
Line segments intersections specification.
References
----------
:cite:`Bourkea`, :cite:`Erdema`
Notes
-----
- Input line segments points coordinates are 2d coordinates.
Examples
--------
>>> l_1 = np.array(
... [[[0.15416284, 0.7400497],
... [0.26331502, 0.53373939]],
... [[0.01457496, 0.91874701],
... [0.90071485, 0.03342143]]]
... )
>>> l_2 = np.array(
... [[[0.95694934, 0.13720932],
... [0.28382835, 0.60608318]],
... [[0.94422514, 0.85273554],
... [0.00225923, 0.52122603]],
... [[0.55203763, 0.48537741],
... [0.76813415, 0.16071675]]]
... )
>>> s = intersect_line_segments(l_1, l_2)
>>> s.xy # doctest: +ELLIPSIS
array([[[ nan, nan],
[ 0.2279184..., 0.6006430...],
[ nan, nan]],
<BLANKLINE>
[[ 0.4281451..., 0.5055568...],
[ 0.3056055..., 0.6279838...],
[ 0.7578749..., 0.1761301...]]])
>>> s.intersect
array([[False, True, False],
[ True, True, True]], dtype=bool)
>>> s.parallel
array([[False, False, False],
[False, False, False]], dtype=bool)
>>> s.coincident
array([[False, False, False],
[False, False, False]], dtype=bool)
"""
l_1 = np.reshape(l_1, (-1, 4))
l_2 = np.reshape(l_2, (-1, 4))
r_1, c_1 = l_1.shape[0], l_1.shape[1]
r_2, c_2 = l_2.shape[0], l_2.shape[1]
x_1, y_1, x_2, y_2 = (
np.tile(l_1[:, i, np.newaxis], (1, r_2)) for i in range(c_1)
)
l_2 = np.transpose(l_2)
x_3, y_3, x_4, y_4 = (np.tile(l_2[i, :], (r_1, 1)) for i in range(c_2))
x_4_x_3 = x_4 - x_3
y_1_y_3 = y_1 - y_3
y_4_y_3 = y_4 - y_3
x_1_x_3 = x_1 - x_3
x_2_x_1 = x_2 - x_1
y_2_y_1 = y_2 - y_1
numerator_a = x_4_x_3 * y_1_y_3 - y_4_y_3 * x_1_x_3
numerator_b = x_2_x_1 * y_1_y_3 - y_2_y_1 * x_1_x_3
denominator = y_4_y_3 * x_2_x_1 - x_4_x_3 * y_2_y_1
with sdiv_mode("Ignore"):
u_a = cast(NDArray, sdiv(numerator_a, denominator))
u_b = cast(NDArray, sdiv(numerator_b, denominator))
intersect = np.logical_and.reduce((u_a >= 0, u_a <= 1, u_b >= 0, u_b <= 1))
xy = tstack([x_1 + x_2_x_1 * u_a, y_1 + y_2_y_1 * u_a])
xy[~intersect] = np.nan
parallel = denominator == 0
coincident = np.logical_and.reduce(
(numerator_a == 0, numerator_b == 0, parallel)
)
return LineSegmentsIntersections_Specification(
xy, intersect, parallel, coincident
)
def ellipse_coefficients_general_form(coefficients: ArrayLike) -> NDArray:
"""
Return the general form ellipse coefficients from given canonical form
ellipse coefficients.
The canonical form ellipse coefficients are as follows: the center
coordinates :math:`x_c` and :math:`y_c`, semi-major axis length
:math:`a_a`, semi-minor axis length :math:`a_b` and rotation angle
:math:`\\theta` in degrees of its semi-major axis :math:`a_a`.
Parameters
----------
coefficients
Canonical form ellipse coefficients.
Returns
-------
:class:`numpy.ndarray`
General form ellipse coefficients.
References
----------
:cite:`Wikipedia`
Examples
--------
>>> coefficients = np.array([0.5, 0.5, 2, 1, 45])
>>> ellipse_coefficients_general_form(coefficients)
array([ 2.5, -3. , 2.5, -1. , -1. , -3.5])
"""
x_c, y_c, a_a, a_b, theta = tsplit(coefficients)
theta = np.radians(theta)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
cos_theta_2 = cos_theta**2
sin_theta_2 = sin_theta**2
a_a_2 = a_a**2
a_b_2 = a_b**2
a = a_a_2 * sin_theta_2 + a_b_2 * cos_theta_2
b = 2 * (a_b_2 - a_a_2) * sin_theta * cos_theta
c = a_a_2 * cos_theta_2 + a_b_2 * sin_theta_2
d = -2 * a * x_c - b * y_c
e = -b * x_c - 2 * c * y_c
f = a * x_c**2 + b * x_c * y_c + c * y_c**2 - a_a_2 * a_b_2
return np.array([a, b, c, d, e, f])
def ellipse_coefficients_canonical_form(coefficients: ArrayLike) -> NDArray:
"""
Return the canonical form ellipse coefficients from given general form
ellipse coefficients.
The general form ellipse coefficients are the coefficients of the implicit
second-order polynomial/quadratic curve expressed as follows:
:math:`F\\left(x, y\\right)` = ax^2 + bxy + cy^2 + dx + ey + f = 0`
with an ellipse-specific constraint such as :math:`b^2 -4ac < 0` and where
:math:`a, b, c, d, e, f` are coefficients of the ellipse and
:math:`F\\left(x, y\\right)` are coordinates of points lying on it.
Parameters
----------
coefficients
General form ellipse coefficients.
Returns
-------
:class:`numpy.ndarray`
Canonical form ellipse coefficients.
References
----------
:cite:`Wikipedia`
Examples
--------
>>> coefficients = np.array([ 2.5, -3.0, 2.5, -1.0, -1.0, -3.5])
>>> ellipse_coefficients_canonical_form(coefficients)
array([ 0.5, 0.5, 2. , 1. , 45. ])
"""
a, b, c, d, e, f = tsplit(coefficients)
d_1 = b**2 - 4 * a * c
n_p_1 = 2 * (a * e**2 + c * d**2 - b * d * e + d_1 * f)
n_p_2 = np.sqrt((a - c) ** 2 + b**2)
a_a = (-np.sqrt(n_p_1 * (a + c + n_p_2))) / d_1
a_b = (-np.sqrt(n_p_1 * (a + c - n_p_2))) / d_1
x_c = (2 * c * d - b * e) / d_1
y_c = (2 * a * e - b * d) / d_1
theta = np.select(
[
np.logical_and(b == 0, a < c),
np.logical_and(b == 0, a > c),
b != 0,
],
[
0,
90,
np.degrees(np.arctan((c - a - n_p_2) / b)),
],
)
return np.array([x_c, y_c, a_a, a_b, theta])
def point_at_angle_on_ellipse(
phi: ArrayLike, coefficients: ArrayLike
) -> NDArray:
"""
Return the coordinates of the point at angle :math:`\\phi` in degrees on
the ellipse with given canonical form coefficients.
Parameters
----------
phi
Point at angle :math:`\\phi` in degrees to retrieve the coordinates
of.
coefficients
General form ellipse coefficients as follows: the center coordinates
:math:`x_c` and :math:`y_c`, semi-major axis length :math:`a_a`,
semi-minor axis length :math:`a_b` and rotation angle :math:`\\theta`
in degrees of its semi-major axis :math:`a_a`.
Returns
-------
:class:`numpy.ndarray`
Coordinates of the point at angle :math:`\\phi`
Examples
--------
>>> coefficients = np.array([0.5, 0.5, 2, 1, 45])
>>> point_at_angle_on_ellipse(45, coefficients) # doctest: +ELLIPSIS
array([ 1., 2.])
"""
phi =
|
np.radians(phi)
|
numpy.radians
|
from spear import *
import numpy.random as rnd
import matplotlib.pyplot as plt
import numpy
### CONSTANTS
a = 0.5
az0 = 0.75
az12 = 0.75
az23 = 0.75
g = 9.81
t_step = 0.1 #duration of a tick in seconds
q_max = 6.0
q_step = q_max/5.0
q_med = q_max/2.0
l_max = 20.0
l_min = 0.0
l_goal = 10.0
delta_l = 0.5
epsilon = 0.3
q2_dev = 0.5
### ENVIROMENT EVOLUTION
def compute_flow_rate(x1, x2, a, a12):
if x1 > x2:
return a12*a*numpy.sqrt(2*g)*numpy.sqrt(x1-x2)
else:
return -a12*a*numpy.sqrt(2*g)*numpy.sqrt(x2-x1)
def compute_q12(ds):
l1 = ds['l1']
l2 = ds['l2']
return compute_flow_rate(l1, l2, a, az12)
def compute_q23(ds):
l2 = ds['l2']
l3 = ds['l3']
return compute_flow_rate(l2, l3, a, az23)
def Env_scenario1(ds):
newds = ds.copy()
q1 = ds['q1']
q2 = ds['q2']
q3 = ds['q3']
newds['q2'] = max(0.0, rnd.normal(q_med, q2_dev))
q12 = compute_q12(ds)
q23 = compute_q23(ds)
newds['l1'] = max(0.0 , ds['l1'] + q1*t_step - q12*t_step)
newds['l2'] = max(0.0 , ds['l2'] + q12*t_step - q23*t_step)
newds['l3'] = max(0.0 , ds['l3'] + q2*t_step + q23*t_step - q3*t_step)
return newds
def Env_scenario2(ds):
newds = ds.copy()
q1 = ds['q1']
q2 = ds['q2']
q3 = ds['q3']
newds['q2'] = min( max(0.0, q2 + rnd.normal(0,1)), q_max)
q12 = compute_q12(ds)
q23 = compute_q23(ds)
newds['l1'] = max(0.0 , ds['l1'] + q1*t_step - q12*t_step)
newds['l2'] = max(0.0 , ds['l2'] + q12*t_step - q23*t_step)
newds['l3'] = max(0.0 , ds['l3'] + q2*t_step + q23*t_step - q3*t_step)
return newds
### PENALTY FUNCTIONS
def rho_fun(x):
v = abs(x-l_goal)/max(l_max-l_goal,l_goal-l_min)
return v
def ranking_function_1(i, ds):
return rho_fun(ds['l1'])
def ranking_function_2(i, ds):
return rho_fun(ds['l2'])
def ranking_function_3(i, ds):
return rho_fun(ds['l3'])
def ranking_function_max(i, ds):
return max(rho_fun(ds['l1']),rho_fun(ds['l2']),rho_fun(ds['l3']))
### ADDITIONAL FUNCTIONS
def plot_tanks_trajectory(k, trj, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l1'] for ds in trj], label='Tank 1')
ax.plot(range(0, k), [ds['l2'] for ds in trj], label='Tank 2')
ax.plot(range(0, k), [ds['l3'] for ds in trj], label='Tank 3')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
def plot_tanks_traj_l1(k, trj1, trj2, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l1'] for ds in trj1], label='Scen 1')
ax.plot(range(0, k), [ds['l1'] for ds in trj2], label='Scen 2')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
def plot_tanks_traj_l2(k, trj1, trj2, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l2'] for ds in trj1], label='Scen 1')
ax.plot(range(0, k), [ds['l2'] for ds in trj2], label='Scen 2')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
def plot_tanks_traj_l3(k, trj1, trj2, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l3'] for ds in trj1], label='Scen 1')
ax.plot(range(0, k), [ds['l3'] for ds in trj2], label='Scen 2')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
def plot_tanks_3runs(k, trj1, trj2, trj3, title, file):
fix, ax = plt.subplots()
ax.plot(range(0, k), [ds['l3'] for ds in trj1], label='0.5')
ax.plot(range(0, k), [ds['l3'] for ds in trj2], label='0.3')
ax.plot(range(0, k), [ds['l3'] for ds in trj3], label='0.7')
ax.plot(range(0,k),[[10] for i in range(k)], '--')
legend = ax.legend()
plt.title(title)
plt.savefig(file)
plt.show()
### PROCESSES
processes = {
'Pin': if_then_else_process(lambda d: d['l1'] > l_goal + d['delta_l'],
act_process({'q1': lambda d: max(0.0, d['q1'] - q_step)}, 'Pin'),
if_then_else_process(lambda d: d['l1'] < l_goal - d['delta_l'],
act_process({'q1': lambda d: min(q_max, d['q1'] + q_step)}, 'Pin'),
act_process({}, 'Pin'))),
'Pout': if_then_else_process(lambda d: d['l3'] > l_goal + d['delta_l'],
act_process({'q3': lambda d: min(q_max, d['q3'] + q_step)}, 'Pout'),
if_then_else_process(lambda d: d['l3'] < l_goal - d['delta_l'],
act_process({'q3': lambda d: max(0.0, d['q3'] - q_step)}, 'Pout'),
act_process({},'Pout')))
}
PTanks = synch_parallel_process(processes['Pin'], processes['Pout'])
def init_ds(q1, q2, q3, l1, l2, l3, delta_l):
return {'q1': q1, 'q2': q2, 'q3': q3, 'l1': l1, 'l2': l2, 'l3': l3, 'delta_l': delta_l}
### SIMULATIONS
ds_basic = init_ds(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, delta_l)
k = 151
n = 1000
l = 5
trj1 = run(processes, Env_scenario1, PTanks, ds_basic, k)
trj2 = run(processes, Env_scenario2, PTanks, ds_basic, k)
plot_tanks_trajectory(k,trj1,"Variation of the level of water in time (Scenario 1)","tank_level_sim_scen1.png")
plot_tanks_trajectory(k,trj2,"Variation of the level of water in time (Scenario 2)","tank_level_sim_scen2.png")
plot_tanks_traj_l1(k,trj1,trj2,"Comparison of the variation in time of l1","tank_sim_1.png")
plot_tanks_traj_l2(k,trj1,trj2,"Comparison of the variation in time of l2","tank_sim_2.png")
plot_tanks_traj_l3(k,trj1,trj2,"Comparison of the variation in time of l3","tank_sim_3.png")
for samples in [ 100, 1000, 10000 ]:
simdata1 = simulate(processes, Env_scenario1, PTanks, ds_basic, k, samples)
simdata2 = simulate(processes, Env_scenario2, PTanks, ds_basic, k, samples)
plot_histogram_double(simdata1, simdata2, [50], lambda d: d['l1'], 7.0, 13.0, 100, "l1, N="+str(samples)+", ", "comp_l1_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [50], lambda d: d['l2'], 7.0, 13.0, 100, "l2, N="+str(samples)+", ", "comp_l2_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [50], lambda d: d['l3'], 7.0, 13.0, 100, "l3, N="+str(samples)+", ", "comp_l3_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [100], lambda d: d['l1'], 8.0, 12.0, 100, "l1, N="+str(samples)+", ", "comp_l1_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [100], lambda d: d['l2'], 8.0, 12.0, 100, "l2, N="+str(samples)+", ", "comp_l2_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [100], lambda d: d['l3'], 8.0, 12.0, 100, "l3, N="+str(samples)+", ", "comp_l3_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [150], lambda d: d['l1'], 8.0, 12.0, 100, "l1, N="+str(samples)+", ", "comp_l1_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [150], lambda d: d['l2'], 8.0, 12.0, 100, "l2, N="+str(samples)+", ", "comp_l2_"+str(samples)+"_")
plot_histogram_double(simdata1, simdata2, [150], lambda d: d['l3'], 8.0, 12.0, 100, "l3, N="+str(samples)+", ", "comp_l3_"+str(samples)+"_")
estdata1_n = simulate(processes, Env_scenario1, PTanks, ds_basic, k, n)
estdata1_nl = simulate(processes, Env_scenario1, PTanks, ds_basic, k, n*l)
estdata2_n = simulate(processes, Env_scenario2, PTanks, ds_basic, k, n)
estdata2_nl = simulate(processes, Env_scenario2, PTanks, ds_basic, k, n*l)
### EVALUATION OF DISTANCES DIFFERENT ENVIRONMENTS
(evmet_12_rho3, pointdist_12_rho3) = distance(processes,PTanks,ds_basic,Env_scenario1,processes,PTanks,ds_basic,Env_scenario2,k,n,l,ranking_function_3)
print("Distance scen1-scen2: "+str(evmet_12_rho3[0]))
fix, ax = plt.subplots()
ax.plot(range(k),evmet_12_rho3,'r.')
ax.plot(range(k),pointdist_12_rho3,'b-')
plt.title("Distance modulo rho_3 scenarios 1-2 N="+str(n)+", l="+str(l))
plt.savefig("distance_scen1-scen2_newest.png")
plt.show()
(evmet_21_rho3, pointdist_21_rho3) = distance(processes,PTanks,ds_basic,Env_scenario2,processes,PTanks,ds_basic,Env_scenario1,k,n,l,ranking_function_3)
print("Distance scen2-scen1: "+str(evmet_21_rho3[0]))
fix, ax = plt.subplots()
ax.plot(range(k),evmet_21_rho3,'r.')
ax.plot(range(k),pointdist_21_rho3,'b-')
plt.title("Distance modulo rho_3 scenarios 2-1 N="+str(n)+", l="+str(l))
plt.savefig("distance_scen2-scen1_newest.png")
plt.show()
(evmet_12_rho1, pointdist_12_rho1) = distance(processes,PTanks,ds_basic,Env_scenario1,processes,PTanks,ds_basic,Env_scenario2,k,n,l,ranking_function_1)
(evmet_12_rho2, pointdist_12_rho2) = distance(processes,PTanks,ds_basic,Env_scenario1,processes,PTanks,ds_basic,Env_scenario2,k,n,l,ranking_function_2)
(evmet_12_rhoM, pointdist_12_rhoM) = distance(processes,PTanks,ds_basic,Env_scenario1,processes,PTanks,ds_basic,Env_scenario2,k,n,l,ranking_function_max)
fix, ax = plt.subplots()
ax.plot(range(k),evmet_12_rho1,label="rho^l1")
ax.plot(range(k),evmet_12_rho2,label="rho^l2")
ax.plot(range(k),evmet_12_rho3,label="rho^l3")
ax.plot(range(k),evmet_12_rhoM,label="rho^max")
legend = ax.legend()
plt.title("Evolution metric wrt different penalty functions N="+str(n)+", l="+str(l))
plt.savefig("ev_distance_rho_scen1-scen2_basic.png")
plt.show()
fix, ax = plt.subplots()
ax.plot(range(k),pointdist_12_rho1,label="rho^l1")
ax.plot(range(k),pointdist_12_rho2,label="rho^l2")
ax.plot(range(k),pointdist_12_rho3,label="rho^l3")
ax.plot(range(k),pointdist_12_rhoM,label="rho^max")
legend = ax.legend()
plt.title("Pointiwise distance wrt different penalty functions N="+str(n)+",l="+str(l))
plt.savefig("pt_distance_rho_scen1-scen2_basic.png")
plt.show()
(evmet_21_rho1, pointdist_21_rho1) = distance(processes,PTanks,ds_basic,Env_scenario2,processes,PTanks,ds_basic,Env_scenario1,k,n,l,ranking_function_1)
(evmet_21_rho2, pointdist_21_rho2) = distance(processes,PTanks,ds_basic,Env_scenario2,processes,PTanks,ds_basic,Env_scenario1,k,n,l,ranking_function_2)
(evmet_21_rhoM, pointdist_21_rhoM) = distance(processes,PTanks,ds_basic,Env_scenario2,processes,PTanks,ds_basic,Env_scenario1,k,n,l,ranking_function_max)
fix, ax = plt.subplots()
ax.plot(range(k),evmet_21_rho1,label="rho^l1")
ax.plot(range(k),evmet_21_rho2,label="rho^l2")
ax.plot(range(k),evmet_21_rho3,label="rho^l3")
ax.plot(range(k),evmet_21_rhoM,label="rho^max")
legend = ax.legend()
plt.title("Evolution metric wrt different penalty functions N="+str(n)+",l="+str(l))
plt.savefig("ev_distance_rho_scen2-scen1_basic.png")
plt.show()
fix, ax = plt.subplots()
ax.plot(range(k),pointdist_21_rho1,label="rho^l1")
ax.plot(range(k),pointdist_21_rho2,label="rho^l2")
ax.plot(range(k),pointdist_21_rho3,label="rho^l3")
ax.plot(range(k),pointdist_21_rhoM,label="rho^max")
legend = ax.legend()
plt.title("Pointiwise distance wrt different penalty functions N="+str(n)+",l="+str(l))
plt.savefig("pt_distance_rho_scen2-scen1_basic.png")
plt.show()
### EVALUATION OF DISTANCES DIFFERENT DELTAS
delta_l_less = 0.3
delta_l_more = 0.7
ds_start_less = init_ds(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, delta_l_less)
ds_start_more = init_ds(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, delta_l_more)
run_less = run(processes, Env_scenario1, PTanks, ds_start_less, k)
run_more = run(processes, Env_scenario1, PTanks, ds_start_more, k)
run_normal = run(processes, Env_scenario1, PTanks, ds_basic, k)
plot_tanks_3runs(k,run_normal,run_less,run_more,"Variation of l3 modulo different delta_l","deltas_scen1.png")
estless_n = simulate(processes, Env_scenario1, PTanks, ds_start_less, k, n)
estmore_nl = simulate(processes, Env_scenario1, PTanks, ds_start_more, k, n*l)
(evmet_32_rho3, pointdist_32_rho3) = compute_distance(estless_n,estdata1_nl,k,n,l,ranking_function_3)
print("Distance 0.3-0.5: "+str(evmet_32_rho3[0]))
(evmet_24_rho3, pointdist_24_rho3) = compute_distance(estdata1_n,estmore_nl,k,n,l,ranking_function_3)
print("Distance 0.5-0.7: "+str(evmet_24_rho3[0]))
(evmet_34_rho3, pointdist_34_rho3) = compute_distance(estless_n,estmore_nl,k,n,l,ranking_function_3)
print("Distance 0.3-0.7: "+str(evmet_34_rho3[0]))
fix, ax = plt.subplots()
ax.plot(range(k),evmet_32_rho3, label='0.3,0.5')
ax.plot(range(k),evmet_24_rho3, label='0.5,0.7')
ax.plot(range(k),evmet_34_rho3, label='0.3,0.7')
legend=ax.legend()
plt.title("Evolution metric with delta_l = 0.3,0.5,0.7, N="+str(n)+",l="+str(l))
plt.savefig("ev_distance_scen1_deltas.png")
plt.show()
fix, ax = plt.subplots()
ax.plot(range(k),pointdist_32_rho3, label='0.3,0.5')
ax.plot(range(k),pointdist_24_rho3, label='0.5,0.7')
ax.plot(range(k),pointdist_34_rho3, label='0.3,0.7')
legend=ax.legend()
plt.title("Pointwise distance with delta_l = 0.3,0.5,0.7, N="+str(n)+",l="+str(l))
plt.savefig("pt_distance_scen1_deltas.png")
plt.show()
### EVALUATION OF ROBUSTNESS
def robust_variation(simdata,ds,scale,size,t1,t2,pdef,p,e,k,n,l,rho):
v = scale*max(l_max-l_goal,l_goal-l_min)
res = []
c=0
sdata = simdata[t1:t2+1]
t = t2-t1+1
for j in range(0,size):
d = ds.copy()
d['l1'] = rnd.uniform(l_min,l_max)
d['l2'] = rnd.uniform(l_min,l_max)
d['l3'] = max(0.0, min(ds['l3'] + rnd.uniform(-v,v),l_max))
d['q1'] =
|
rnd.uniform(0.0,q_max)
|
numpy.random.uniform
|
#!/usr/bin/env python
"""
analysis.py
Methods to assist in the analysis of ROMS fields
Written by <NAME> on 05/24/15
Copyright (c)2019 University of Hawaii under the MIT-License.
"""
import numpy as np
from joblib import Parallel, delayed
import seapy
import netCDF4
def __find_surface_thread(grid, field, value, zeta, const_depth=False,
k_values=False, u_grid=False, v_grid=False):
"""
Internal function to find a value in field_a and return the
values from field_b at the same positions.
"""
depth = seapy.roms.depth(grid.vtransform, grid.h, grid.hc,
grid.s_rho, grid.cs_r, zeta)
if u_grid:
depth = seapy.model.rho2u(depth)
elif v_grid:
depth = seapy.model.rho2v(depth)
# Set the variables based on what we are finding
if const_depth:
field_a, field_b = depth, field
else:
field_a, field_b = field, depth
# Determine the upper and lower bounds of the value in the field
tmp = np.ma.masked_equal(
np.diff(((field_a - value) < 0).astype(np.short), axis=0), 0)
factor = -np.sign(np.mean(np.diff(field, axis=0))).astype(np.short)
# Determine the points of the upper bound and the lower bound
bad = np.sum(tmp, axis=0).astype(bool)
k_ones = np.arange(grid.n, dtype=np.short)
upper = (k_ones[:, np.newaxis, np.newaxis] ==
np.argmax(np.abs(tmp), axis=0)) * bad
k_ones = np.arange(grid.n, dtype=np.short) - factor
lower = (k_ones[:, np.newaxis, np.newaxis] ==
np.argmax(np.abs(tmp), axis=0)) * bad
# Now that we have the bounds, we can linearly interpolate to
# find where the value lies
u_a = np.sum(field_a * upper, axis=0)
d_a = u_a - np.sum(field_a * lower, axis=0)
d_z = (u_a - value) / d_a
if k_values:
return np.argmax(upper, axis=0) + factor * d_z
# Calculate the values from field_b
u_b = np.sum(field_b * upper, axis=0)
d_b = u_b - np.sum(field_b * lower, axis=0)
return u_b - d_b * d_z
def constant_depth(field, grid, depth, zeta=None, threads=2):
"""
Find the values of a 3-D field at a constant depth for all times given.
Parameters
----------
field : ndarray,
ROMS 3-D field to interpolate onto a constant depth level. If 4-D, it
will calculate through time.
grid : seapy.model.grid or string or list,
Grid that defines the depths and stretching for the field given
depth : float,
Depth (in meters) to find all values
zeta : ndarray, optional,
ROMS zeta field corresponding to field if you wish to apply the SSH
correction to the depth calculations.
threads : int, optional,
Number of threads to use for processing
Returns
-------
nfield : ndarray,
Values from ROMS field on the given constant depth
"""
grid = seapy.model.asgrid(grid)
field = np.ma.masked_invalid(field, copy=False)
depth = depth if depth < 0 else -depth
if depth is None or grid.depth_rho.min() > depth > grid.depth_rho.max():
warn("Error: {:f} is out of range for the depth.".format(value))
return
if np.ndim(field) == 3:
field = seapy.adddim(field)
nt = field.shape[0]
threads = np.minimum(nt, threads)
if zeta is None:
zeta = np.zeros((nt, 1, 1))
if np.ndim(zeta) == 2:
zeta = seapy.adddim(zeta, nt)
v_grid = u_grid = False
if field.shape[-2:] == grid.mask_u:
u_grid = True
elif field.shape[-2:] == grid.mask_v:
v_grid = True
return np.ma.array(Parallel(n_jobs=threads, verbose=2)
(delayed(__find_surface_thread)
(grid, field[i, :], depth, zeta[i, :],
const_depth=True, u_grid=u_grid, v_grid=v_grid)
for i in range(nt)), copy=False)
def constant_value(field, grid, value, zeta=None, threads=2):
"""
Find the depth of the value across the field. For example, find the depth
of a given isopycnal if the field is density.
Parameters
----------
field : ndarray,
ROMS 3-D field to interpolate onto a constant depth level. If 4-D, it
will calculate through time.
grid : seapy.model.grid or string or list,
Grid that defines the depths and stretching for the field given
value : float,
Value to find the depths for in same units as the 'field'
zeta : ndarray, optional,
ROMS zeta field corresponding to field if you wish to apply the SSH
correction to the depth calculations.
threads : int, optional,
Number of threads to use for processing
Returns
-------
nfield : ndarray,
Depths from ROMS field on the given value
"""
grid = seapy.model.asgrid(grid)
field = np.ma.masked_invalid(field, copy=False)
if value is None or field.min() > value > field.max():
warn("Error: {:f} is out of range for the field.".format(value))
return
if np.ndim(field) == 3:
field = seapy.adddim(field)
nt = field.shape[0]
threads = np.minimum(nt, threads)
if zeta is None:
zeta = np.zeros((nt, 1, 1))
if np.ndim(zeta) == 2:
zeta = seapy.adddim(zeta, nt)
v_grid = u_grid = False
if field.shape[-2:] == grid.mask_u:
u_grid = True
elif field.shape[-2:] == grid.mask_v:
v_grid = True
return np.ma.array(Parallel(n_jobs=threads, verbose=2)
(delayed(__find_surface_thread)
(grid, field[i, :], value, zeta[i, :],
u_grid=u_grid, v_grid=v_grid)
for i in range(nt)), copy=False)
def constant_value_k(field, grid, value, zeta=None, threads=2):
"""
Find the layer number of the value across the field. For example, find the k
of a given isopycnal if the field is density.
Parameters
----------
field : ndarray,
ROMS 3-D field to interpolate onto a constant depth level. If 4-D, it
will calculate through time.
grid : seapy.model.grid or string or list,
Grid that defines the depths and stretching for the field given
value : float,
Value to find the depths for in same units as the 'field'
threads : int, optional,
Number of threads to use for processing
Returns
-------
nfield : ndarray,
Depths from ROMS field on the given value
"""
grid = seapy.model.asgrid(grid)
field = np.ma.masked_invalid(field, copy=False)
if value is None or field.min() > value > field.max():
warn("Error: {:f} is out of range for the field.".format(value))
return
if np.ndim(field) == 3:
field = seapy.adddim(field)
nt = field.shape[0]
threads = np.minimum(nt, threads)
if zeta is None:
zeta = np.zeros((nt, 1, 1))
if np.ndim(zeta) == 2:
zeta = seapy.adddim(zeta, nt)
v_grid = u_grid = False
if field.shape[-2:] == grid.mask_u:
u_grid = True
elif field.shape[-2:] == grid.mask_v:
v_grid = True
return np.ma.array(Parallel(n_jobs=threads, verbose=2)
(delayed(__find_surface_thread)
(grid, field[i, :], value, zeta[i, :],
k_values=True, u_grid=u_grid, v_grid=v_grid)
for i in range(nt)), copy=False)
def depth_average(field, grid, bottom, top, zeta=None):
"""
Compute the depth-averaged field down to the depth specified. NOTE:
This just finds the nearest layer, so at every grid cell, it may not be
exactly the specified depth.
Parameters
----------
field : ndarray,
ROMS 3-D field to integrate from a depth level. Must be
three-dimensional array (single time).
grid : seapy.model.grid or string or list,
Grid that defines the depths and stretching for the field given
bottom : float,
Depth (in meters) to integrate from
top : float,
Depth (in meters) to integrate to
zeta : ndarray, optional,
ROMS zeta field corresponding to field if you wish to apply the SSH
correction to the depth calculations.
Returns
-------
ndarray,
Values from depth integrated ROMS field
"""
grid = seapy.model.asgrid(grid)
bottom = bottom if bottom < 0 else -bottom
top = top if top < 0 else -top
if bottom > top:
bottom, top = top, bottom
drange = top - bottom
# If we have zeta, we need to compute thickness
if zeta is not None:
s_w, cs_w = seapy.roms.stretching(grid.vstretching, grid.theta_s,
grid.theta_b, grid.hc,
grid.n, w_grid=True)
depths = np.ma.masked_equal(seapy.roms.depth(
grid.vtransform, grid.h, grid.hc, grid.s_rho, grid.cs_r) *
grid.mask_rho, 0)
thickness = np.ma.masked_array(seapy.roms.thickness(
grid.vtransform, grid.h, grid.hc, s_w, cs_w, zeta) *
grid.mask_rho, 0)
else:
depths = np.ma.masked_equal(grid.depth_rho * grid.mask_rho, 0)
thickness = np.ma.masked_equal(grid.thick_rho * grid.mask_rho, 0)
# If we are on u- or v-grid, transform
if field.shape == grid.thick_u.shape:
depths = seapy.model.rho2u(depths)
thickness = seapy.model.rho2u(thickness)
elif field.shape == grid.thick_v.shape:
depths = seapy.model.rho2v(depths)
thickness = seapy.model.rho2v(thickness)
# 1. pick all of the points that are deeper and shallower than the limits
k_ones = np.arange(grid.n, dtype=int)
top_depth = depths[-1, :, :] if top_depth == 0 else top_depth
upper = depths - top_depth
upper[np.where(upper < 0)] = np.float('inf')
lower = depths - depth
lower[np.where(lower > 0)] = -np.float('inf')
thickness *= np.ma.masked_equal(np.logical_and(
k_ones[:, np.newaxis, np.newaxis] <= np.argmin(upper, axis=0),
k_ones[:, np.newaxis, np.newaxis] >=
np.argmax(lower, axis=0)).astype(int), 0)
# Do the integration
return np.sum(field * thickness, axis=0) / \
np.sum(thickness, axis=0)
def transect(lon, lat, depth, data, nx=200, nz=40, z=None):
"""
Generate an equidistant transect from data at varying depths. Can be
used to plot a slice of model or observational data.
Parameters
----------
lat: array
n-dimensional array with latitude of points
lon: array
n-dimensional array with longitude of points
depth: array
[k,n] dimensional array of the depths for all k-layers at each n point
data: array
[k,n] dimensional array of values
nx: int, optional
number of horizontal points desired in the transect
nz: int, optional
number of vertical points desired in the transect
z: array, optional
list of depths to use if you do not want equidistant depths
Returns
-------
x: array
x-location values in [m] along transect
z: array
depth values in [m] of the new transect
vals: np.ma.array
data values of the new transect with masked values
Examples
--------
Generate series of transects from ROMS output
>>> nc = seapy.netcdf('roms_his.nc')
>>> grid = seapy.model.asgrid(nc)
>>> data = nc.variables['salt'][:,:,150,:]
>>> shp = (data.shape[0], 50, 400)
>>> transect = np.zeros(shp)
>>> for i in range(shp[0]):
>>> x, z, d = \
>>> seapy.roms.analysis.transect(grid.lon_rho[150,:],
>>> grid.lat_rho[150,:],
>>> grid.depth_rho[:,150,:],
>>> data[i,:,:], nx=shp[2],
>>> nz=shp[1])
>>> transect[i,:,:] = d.filled(np.nan)
>>> nc.close()
>>> plt.pcolormesh(x/1000, z, transect[0, :, :])
"""
from scipy.interpolate import griddata
depth = np.atleast_2d(depth)
data = np.ma.atleast_2d(data).filled(
|
np.mean(data)
|
numpy.mean
|
import keyboard
from utils.custom_mouse import mouse
from char import IChar
import template_finder
from template_finder import TemplateMatch
from pather import Pather
from logger import Logger
from screen import grab, convert_abs_to_monitor, convert_screen_to_abs
from config import Config
from utils.misc import wait, rotate_vec, unit_vector
import random
from pather import Location, Pather
import numpy as np
import time
import os
from ui_manager import wait_until_visible, ScreenObjects
class Necro(IChar):
def __init__(self, skill_hotkeys: dict, pather: Pather):
os.system('color')
Logger.info("\033[94m<<Setting up Necro>>\033[0m")
super().__init__(skill_hotkeys)
self._pather = pather
#custom necro pathing for pindle
self._pather.adapt_path((Location.A5_PINDLE_START, Location.A5_PINDLE_SAFE_DIST), [100,101])
self._pather.adapt_path((Location.A5_PINDLE_SAFE_DIST, Location.A5_PINDLE_END), [104])
#minor offsets to pindle fight locations
self._pather.offset_node(102, [15, 0])
self._pather.offset_node(103, [15, 0])
self._pather.offset_node(101, [100,-5])
#custom locations for shenk paths
self._pather.adapt_path((Location.A5_SHENK_START, Location.A5_SHENK_SAFE_DIST),[141, 142, 143, 144, 145])
self._pather.adapt_path((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), [149])
#custom locations for trav paths
#self._pather.adapt_path((Location.A3_TRAV_START, Location.A3_TRAV_CENTER_STAIRS), [220, 221, 222, 223, 224, 225, 226])
self._pather.adapt_path((Location.A3_TRAV_START, Location.A3_TRAV_CENTER_STAIRS), [900])
self._pather.offset_node(910, [50,50])
self._pather.offset_node(906, [20,0])
self._pather.offset_node(226, [-20,0])
self._pather.offset_node(228, [0,20])
self._shenk_dead = 0
self._skeletons_count=0
self._mages_count=0
self._golem_count="none"
self._revive_count=0
def _check_shenk_death(self):
''' make sure shenk is dead checking for fireballs so we can exit combat sooner '''
roi = [640,0,640,720]
img = grab()
template_match = template_finder.search(
['SHENK_DEATH_1','SHENK_DEATH_2','SHENK_DEATH_3','SHENK_DEATH_4'],
img,
threshold=0.6,
roi=roi,
use_grayscale = False
)
if template_match.valid:
self._shenk_dead=1
Logger.info('\33[31m'+"Shenks Dead, looting..."+'\033[0m')
else:
return True
def _count_revives(self):
roi = [15,14,400,45]
img = grab()
max_rev = 13
template_match = template_finder.search(
['REV_BASE'],
img,
threshold=0.6,
roi=roi
)
if template_match.valid:
self._revive_count=max_rev
else:
self._revive_count=0
return True
for count in range(1,max_rev):
rev_num = "REV_"+str(count)
template_match = template_finder.search(
[rev_num],
img,
threshold=0.66,
roi=roi,
use_grayscale = False
)
if template_match.valid:
self._revive_count=count
def _count_skeletons(self):
roi = [15,14,400,45]
img = grab()
max_skeles = 13
template_match = template_finder.search(
['SKELE_BASE'],
img,
threshold=0.6,
roi=roi
)
if template_match.valid:
self._skeletons_count=max_skeles
else:
self._skeletons_count=0
return True
for count in range(1,max_skeles):
skele_num = "SKELE_"+str(count)
template_match = template_finder.search(
[skele_num],
img,
threshold=0.66,
roi=roi,
use_grayscale = False
)
if template_match.valid:
self._skeletons_count=count
def _count_gol(self):
roi = [15,14,400,45]
img = grab()
template_match = template_finder.search(
['CLAY'],
img,
threshold=0.6,
roi=roi
)
if template_match.valid:
self._golem_count="clay gol"
else:
self._golem_count="none"
return True
def _summon_count(self):
''' see how many summons and which golem are out '''
self._count_skeletons()
self._count_revives()
self._count_gol()
def _summon_stat(self):
''' print counts for summons '''
Logger.info('\33[31m'+"Summon status | "+str(self._skeletons_count)+"skele | "+str(self._revive_count)+" rev | "+self._golem_count+" |"+'\033[0m')
def _revive(self, cast_pos_abs: tuple[float, float], spray: int = 10, cast_count: int=12):
Logger.info('\033[94m'+"raise revive"+'\033[0m')
keyboard.send(Config().char["stand_still"], do_release=False)
for _ in range(cast_count):
if self._skill_hotkeys["raise_revive"]:
keyboard.send(self._skill_hotkeys["raise_revive"])
#Logger.info("revive -> cast")
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
nx = cast_pos_monitor[0]
ny = cast_pos_monitor[1]
if(nx>1280):
nx=1275
if(ny>720):
ny=715
if(nx<0):
nx=0
if(ny<0):
ny=0
clamp = [nx,ny]
mouse.move(*clamp)
mouse.press(button="right")
wait(0.075, 0.1)
mouse.release(button="right")
keyboard.send(Config().char["stand_still"], do_press=False)
def _raise_skeleton(self, cast_pos_abs: tuple[float, float], spray: int = 10, cast_count: int=16):
Logger.info('\033[94m'+"raise skeleton"+'\033[0m')
keyboard.send(Config().char["stand_still"], do_release=False)
for _ in range(cast_count):
if self._skill_hotkeys["raise_skeleton"]:
keyboard.send(self._skill_hotkeys["raise_skeleton"])
#Logger.info("raise skeleton -> cast")
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
nx = cast_pos_monitor[0]
ny = cast_pos_monitor[1]
if(nx>1280):
nx=1279
if(ny>720):
ny=719
if(nx<0):
nx=0
if(ny<0):
ny=0
clamp = [nx,ny]
mouse.move(*clamp)
mouse.press(button="right")
wait(0.02, 0.05)
mouse.release(button="right")
keyboard.send(Config().char["stand_still"], do_press=False)
def _raise_mage(self, cast_pos_abs: tuple[float, float], spray: int = 10, cast_count: int=16):
Logger.info('\033[94m'+"raise mage"+'\033[0m')
keyboard.send(Config().char["stand_still"], do_release=False)
for _ in range(cast_count):
if self._skill_hotkeys["raise_mage"]:
keyboard.send(self._skill_hotkeys["raise_mage"])
#Logger.info("raise skeleton -> cast")
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
nx = cast_pos_monitor[0]
ny = cast_pos_monitor[1]
if(nx>1280):
nx=1279
if(ny>720):
ny=719
if(nx<0):
nx=0
if(ny<0):
ny=0
clamp = [nx,ny]
mouse.move(*clamp)
mouse.press(button="right")
wait(0.02, 0.05)
mouse.release(button="right")
keyboard.send(Config().char["stand_still"], do_press=False)
def pre_buff(self):
#only CTA if pre trav
if Config().char["cta_available"]:
self._pre_buff_cta()
if self._shenk_dead==1:
Logger.info("trav buff?")
#self._heart_of_wolverine()
Logger.info("prebuff/cta")
def _heart_of_wolverine(self):
Logger.info('\033[94m'+"buff ~> heart_of_wolverine"+'\033[0m')
keyboard.send(self._skill_hotkeys["heart_of_wolverine"])
wait(0.05, 0.2)
mouse.click(button="right")
wait(self._cast_duration)
def _clay_golem(self):
Logger.info('\033[94m'+"cast ~> clay golem"+'\033[0m')
keyboard.send(self._skill_hotkeys["clay_golem"])
wait(0.05, 0.2)
mouse.click(button="right")
wait(self._cast_duration)
def bone_armor(self):
if self._skill_hotkeys["bone_armor"]:
keyboard.send(self._skill_hotkeys["bone_armor"])
wait(0.04, 0.1)
mouse.click(button="right")
wait(self._cast_duration)
if self._skill_hotkeys["clay_golem"]:
keyboard.send(self._skill_hotkeys["clay_golem"])
wait(0.04, 0.1)
mouse.click(button="right")
wait(self._cast_duration)
def _bone_armor(self):
if self._skill_hotkeys["bone_armor"]:
keyboard.send(self._skill_hotkeys["bone_armor"])
wait(0.04, 0.1)
mouse.click(button="right")
wait(self._cast_duration)
def _left_attack(self, cast_pos_abs: tuple[float, float], spray: int = 10):
keyboard.send(Config().char["stand_still"], do_release=False)
if self._skill_hotkeys["skill_left"]:
keyboard.send(self._skill_hotkeys["skill_left"])
for _ in range(10):
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
mouse.press(button="left")
wait(0.25, 0.3)
mouse.release(button="left")
keyboard.send(Config().char["stand_still"], do_press=False)
def _left_attack_single(self, cast_pos_abs: tuple[float, float], spray: int = 10, cast_count: int=6):
keyboard.send(Config().char["stand_still"], do_release=False)
if self._skill_hotkeys["skill_left"]:
keyboard.send(self._skill_hotkeys["skill_left"])
for _ in range(cast_count):
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
mouse.press(button="left")
wait(0.25, 0.3)
mouse.release(button="left")
keyboard.send(Config().char["stand_still"], do_press=False)
def _amp_dmg(self, cast_pos_abs: tuple[float, float], spray: float = 10):
if self._skill_hotkeys["amp_dmg"]:
keyboard.send(self._skill_hotkeys["amp_dmg"])
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
mouse.press(button="right")
wait(0.25, 0.35)
mouse.release(button="right")
def _corpse_explosion(self, cast_pos_abs: tuple[float, float], spray: int = 10,cast_count: int = 8):
keyboard.send(Config().char["stand_still"], do_release=False)
Logger.info('\033[93m'+"corpse explosion~> random cast"+'\033[0m')
for _ in range(cast_count):
if self._skill_hotkeys["corpse_explosion"]:
keyboard.send(self._skill_hotkeys["corpse_explosion"])
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
mouse.press(button="right")
wait(0.075, 0.1)
mouse.release(button="right")
keyboard.send(Config().char["stand_still"], do_press=False)
def _cast_circle(self, cast_dir: tuple[float,float],cast_start_angle: float=0.0, cast_end_angle: float=90.0,cast_div: int = 10,cast_v_div: int=4,cast_spell: str='raise_skeleton',delay: float=1.0,offset: float=1.0):
Logger.info('\033[93m'+"circle cast ~>"+cast_spell+'\033[0m')
keyboard.send(Config().char["stand_still"], do_release=False)
keyboard.send(self._skill_hotkeys[cast_spell])
mouse.press(button="right")
for i in range(cast_div):
angle = self._lerp(cast_start_angle,cast_end_angle,float(i)/cast_div)
target = unit_vector(rotate_vec(cast_dir, angle))
#Logger.info("current angle ~> "+str(angle))
for j in range(cast_v_div):
circle_pos_screen = self._pather._adjust_abs_range_to_screen((target*120.0*float(j+1.0))*offset)
circle_pos_monitor = convert_abs_to_monitor(circle_pos_screen)
mouse.move(*circle_pos_monitor,delay_factor=[0.3*delay, .6*delay])
#Logger.info("circle move")
mouse.release(button="right")
keyboard.send(Config().char["stand_still"], do_press=False)
def kill_pindle(self) -> bool:
atk_len = max(2, int(Config().char["atk_len_pindle"] / 2))
pindle_pos_abs = convert_screen_to_abs(Config().path["pindle_end"][0])
cast_pos_abs = [pindle_pos_abs[0] * 0.9, pindle_pos_abs[1] * 0.9]
pc = [pindle_pos_abs[0] * 0.9, (pindle_pos_abs[1]-50) * 0.9]
raise_skel_pos = [0,10]
rot_deg=0
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=32,cast_v_div=2,cast_spell='raise_skeleton',offset=2,delay=1.6)
wait(self._cast_duration, self._cast_duration +.2)
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=4,cast_v_div=3,cast_spell='amp_dmg',delay=3.0)
rot_deg=0
rot_deg=-180
#enable this if your merc is dying
pindle_pack_kill = bool(int(self._skill_hotkeys["clear_pindle_pack"]))
if(pindle_pack_kill):
Logger.info('\033[93m'+"optional pindle pack"+'\033[0m')
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=12,cast_v_div=2,cast_spell='corpse_explosion',delay=3.0,offset=1.8)
wait(self._cast_duration, self._cast_duration +.2)
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=12,cast_v_div=2,cast_spell='corpse_explosion',delay=3.0,offset=1.8)
wait(self._cast_duration, self._cast_duration +.1)
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=12,cast_v_div=2,cast_spell='raise_revive',delay=1.2,offset=1.8)
#move to pindle combat position
self._pather.traverse_nodes([102,103], self)
wait(self._cast_duration, self._cast_duration +.2)
# wiggle to unstick merc....
pos_m = convert_abs_to_monitor((0, -150))
self.pre_move()
self.move(pos_m, force_move=True)
wait(self._cast_duration, self._cast_duration +.1)
self._bone_armor()
# wiggle to unstick merc....
pos_m = convert_abs_to_monitor((0, 150))
self.pre_move()
self.move(pos_m, force_move=True)
wait(self._cast_duration, self._cast_duration +.1)
self._amp_dmg(cast_pos_abs, 11)
wait(self._cast_duration, self._cast_duration +.2)
self._clay_golem()
self._summon_count()
for _ in range(atk_len):
Logger.info('\033[96m'+ "pindle atk cycle" + '\033[0m')
self._amp_dmg(cast_pos_abs, 11)
self._left_attack_single(cast_pos_abs, 11, cast_count=8)
rot_deg=0
for _ in range(2):
corpse_pos = unit_vector(rotate_vec(cast_pos_abs, rot_deg)) * 200
self._corpse_explosion(pc,40,cast_count=2)
rot_deg-=7
rot_deg=0
for _ in range(2):
corpse_pos = unit_vector(rotate_vec(cast_pos_abs, rot_deg)) * 200
self._corpse_explosion(pc,40,cast_count=2)
rot_deg+=7
# wiggle to unstick merc
pos_m = convert_abs_to_monitor((0, -150))
self.pre_move()
self.move(pos_m, force_move=True)
wait(self._cast_duration, self._cast_duration +.1)
pos_m = convert_abs_to_monitor((0, 150))
self.pre_move()
self.move(pos_m, force_move=True)
wait(self._cast_duration, self._cast_duration +.1)
self._summon_count()
self._revive(cast_pos_abs,50,cast_count=4)
self._summon_stat()
Logger.info('\033[92m'+"atk cycle end"+'\033[0m')
#wait for pindle to die just incase - maybe needs death detection
wait(self._cast_duration, self._cast_duration + 0.4)
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes("pindle_end", self)
else:
self._pather.traverse_nodes((Location.A5_PINDLE_SAFE_DIST, Location.A5_PINDLE_END), self, force_tp=True)
return True
def kill_eldritch(self) -> bool:
self._summon_stat()
atk_len = max(2, int(Config().char["atk_len_eldritch"] / 2))
eld_pos_abs = convert_screen_to_abs(Config().path["eldritch_end"][0])
cast_pos_abs = [eld_pos_abs[0] * 0.9, eld_pos_abs[1] * 0.9]
self.bone_armor()
# move a bit back
pos_m = convert_abs_to_monitor((0, 50))
self.pre_move()
self.move(pos_m, force_move=True)
self._amp_dmg(cast_pos_abs, 11)
corpse_exp_pos = [0,-80]
for _ in range(atk_len):
#Logger.info("atk cycle")
Logger.info('\033[96m'+ "eldrich atk cycle" + '\033[0m')
self._left_attack_single(cast_pos_abs, 11, cast_count=8)
self._corpse_explosion(cast_pos_abs, 60, cast_count=4)
Logger.info('\033[92m'+"atk cycle end"+'\033[0m')
# Move to items
wait(self._cast_duration, self._cast_duration + 0.2)
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes_fixed("eldritch_end", self)
else:
self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, timeout=0.6, force_tp=True)
self.bone_armor()
#get some more summons out for elite packs
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=12,cast_v_div=4,cast_spell='raise_revive',delay=1.2,offset=.8)
#self._summon_count()
#self._raise_skeleton([0,-40],80,cast_count=4)
#self._raise_mage(cast_pos_abs,80,cast_count=10)
for _ in range(10):
self._summon_count()
if self._skeletons_count < 10:
self._raise_skeleton([0,10],180,cast_count=2)
self._raise_skeleton([0,-10],180,cast_count=2)
#if self._mages_count < 5:
# self._raise_mage([0,-40],80,cast_count=2)
# self._raise_mage(cast_pos_abs,90,cast_count=2)
if self._revive_count < 10:
self._revive([0,10],180,cast_count=2)
self._revive([0,-10],180,cast_count=2)
self._summon_stat()
# move a bit back
pos_m = convert_abs_to_monitor((0, -350))
self.pre_move()
self.move(pos_m, force_move=True)
return True
def kill_shenk(self) -> bool:
#stop to kill potentially troublesome packs
Logger.info('\033[93m'+"dealing with posible packs"+'\033[0m')
self.bone_armor()
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=4,cast_v_div=3,cast_spell='amp_dmg',delay=3.0)
self._corpse_explosion([0,50], 80, cast_count=8)
self._summon_stat()
#continue to shenk fight
self._pather.traverse_nodes(([ 146, 147, 148]), self, timeout=1.4, force_tp=True)
shenk_pos_abs = self._pather.find_abs_node_pos(149, grab())
if shenk_pos_abs is None:
shenk_pos_abs = convert_screen_to_abs(Config().path["shenk_end"][0])
cast_pos_abs = [shenk_pos_abs[0] * 0.9, shenk_pos_abs[1] * 0.9]
self.bone_armor()
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=4,cast_v_div=3,cast_spell='amp_dmg',delay=3.0)
corpse_exp_pos = [200,80]
for _ in range(int(Config().char["atk_len_shenk"])):
Logger.info('\033[96m'+ "shenk atk cycle" + '\033[0m')
self._check_shenk_death()
if(self._shenk_dead):
break
self._left_attack_single(cast_pos_abs, 11, cast_count=4)
self._bone_armor()
self._amp_dmg(cast_pos_abs, 11)
self._corpse_explosion(corpse_exp_pos, 80, cast_count=12)
self._summon_count()
if self._skeletons_count < 10:
self._raise_skeleton(cast_pos_abs,160,cast_count=6)
#if self._mages_count < 5:
# self._raise_mage(cast_pos_abs,160,cast_count=6)
if self._revive_count < 10:
self._revive(cast_pos_abs,160,cast_count=2)
self._check_shenk_death()
if(self._shenk_dead):
break
Logger.info('\033[92m'+"atk cycle end"+'\033[0m')
# Move to items
#wait(self._cast_duration, self._cast_duration + 0.2)
self._pather.traverse_nodes(([148,149]), self, timeout=3.4, force_tp=True)
for _ in range(30):
self._summon_count()
if self._skeletons_count < 10:
self._raise_skeleton([-50,-90],160,cast_count=2)
#if self._mages_count < 5:
# self._raise_mage([-50,-90],160,cast_count=2)
if self._revive_count < 10:
self._revive([-50,-90],160,cast_count=2)
self._shenk_dead = 1
self._summon_stat()
return True
def stairs_S(self):
roi = [0,0,1280,720]
img = grab()
template_match = template_finder.search(
["TRAV_S","TRAV_S_1"],
img,
threshold=0.4,
roi=roi
)
if template_match.valid:
pos = template_match.center_monitor
pos = (pos[0], pos[1] )
Logger.debug("mid point >> "+str(pos))
for i in range(8):
mouse.move(*pos, randomize=6, delay_factor=[0.9, 1.1])
wait(0.08, 0.15)
mouse.click(button="left")
Logger.debug("got to S")
return True
else:
Logger.debug("cant find path!")
def stairs_F(self):
roi = [0,0,1280,720]
img = grab()
template_match = template_finder.search(
["TRAV_F"],
img,
threshold=0.4,
roi=roi
)
if template_match.valid:
pos = template_match.center_monitor
pos = (pos[0], pos[1] )
Logger.debug("mid point >> "+str(pos))
for i in range(8):
mouse.move(*pos, randomize=6, delay_factor=[0.9, 1.1])
wait(0.08, 0.15)
mouse.click(button="left")
Logger.debug("got to F")
return True
else:
Logger.debug("cant find path!")
def stairs_W(self):
roi = [0,0,1280,720]
img = grab()
template_match = template_finder.search(
["TRAV_W","TRAV_W_1"],
img,
threshold=0.4,
roi=roi
)
if template_match.valid:
pos = template_match.center_monitor
pos = (pos[0], pos[1] )
Logger.debug("mid point >> "+str(pos))
for i in range(8):
mouse.move(*pos, randomize=6, delay_factor=[0.9, 1.1])
wait(0.08, 0.15)
mouse.click(button="left")
Logger.debug("got to W")
return True
else:
Logger.debug("cant find path!")
def to_durance(self):
'''enter the durance of hate to gather summons'''
roi = [0,0,1280,720]
img = grab()
template_match = template_finder.search(
["TRAV_18"],
img,
threshold=0.3,
roi=roi
)
if template_match.valid:
pos = template_match.center_monitor
pos = (pos[0], pos[1] )
Logger.debug("DURANCE ENTRANCE >> "+str(pos))
# Note: Template is top of portal, thus move the y-position a bit to the bottom
for i in range(2):
mouse.move(*pos, randomize=6, delay_factor=[0.9, 1.1])
wait(0.08, 0.15)
mouse.click(button="left")
Logger.debug("enter durance lv 1")
return wait_until_visible(ScreenObjects.Loading, 2).valid
else:
Logger.debug("cant find durance!, trying fixed loc")
return False
def to_trav(self):
'''exit the durance of hate back to trav'''
Logger.debug("leaving the durance...")
roi = [0,0,1280,720]
while True:
spray = 200
target =[0,0]
x = target[0] + (random.random() * 2*spray - spray)
y = target[1] + (random.random() * 2*spray - spray)
target = convert_abs_to_monitor((x, y))
mouse.move(*target, randomize=6, delay_factor=[0.9, 1.1])
img = grab()
template_match = template_finder.search(
["TO_TRAV_0"],
img,
threshold=0.95,
roi=roi
)
if template_match.valid:
pos = template_match.center_monitor
pos = (pos[0], pos[1] )
Logger.debug("DURANCE EXIT >> "+str(pos))
# Note: Template is top of portal, thus move the y-position a bit to the bottom
for i in range(2):
mouse.move(*pos, randomize=6, delay_factor=[0.9, 1.1])
wait(0.08, 0.15)
mouse.click(button="left")
Logger.debug("entering trav...")
if wait_until_visible(ScreenObjects.Loading, 2).valid:
return True
return False
def kill_council(self) -> bool:
''' kill the council '''
result = self._pather.traverse_nodes((901,902,903,904,905,906,226,228,300), self, force_move=True,timeout = 2.5,threshold=.55)
#this is gross but the skeletons I think cause pathing issues
while result is False:
#struggle to find a way, prob a skeleton confusing the way
pos_m = convert_abs_to_monitor((20, 20))
self.pre_move()
self.move(pos_m, force_move=True)
wait(self._cast_duration, self._cast_duration +.1)
pos_m = convert_abs_to_monitor((-20, -20))
self.pre_move()
self.move(pos_m, force_move=True)
wait(self._cast_duration, self._cast_duration +.1)
if self._pather.find_abs_node_pos(904, grab()):
#try again
result = self._pather.traverse_nodes((904,905,906,226,228,300), self, force_move=True,timeout = 2.5)
elif self._pather.find_abs_node_pos(905, grab()):
#try again
result = self._pather.traverse_nodes((905,906,226,228,300), self, force_move=True,timeout = 2.5)
elif self._pather.find_abs_node_pos(906, grab()):
#try again
result = self._pather.traverse_nodes((906,226,228,300), self, force_move=True,timeout = 2.5)
elif self._pather.find_abs_node_pos(226, grab()):
#try again
result = self._pather.traverse_nodes((226,228,300), self, force_move=True,timeout = 2.5)
elif self._pather.find_abs_node_pos(901, grab()):
#try again
result = self._pather.traverse_nodes((901,902,903,904,905,906,226,228,300), self, force_move=True,timeout = 2.5)
elif self._pather.find_abs_node_pos(902, grab()):
#try again
result = self._pather.traverse_nodes((902,903,904,905,906,226,228,300), self, force_move=True,timeout = 2.5)
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=4,cast_v_div=3,cast_spell='amp_dmg',delay=3.0)
enter = False
while enter is False:
enter = self.to_durance()
wait(.25)
exit = self.to_trav()
self._cast_circle(cast_dir=[-1,1],cast_start_angle=0,cast_end_angle=360,cast_div=4,cast_v_div=3,cast_spell='amp_dmg',delay=3.0)
#enter = False
#while enter is False:
# enter = self.to_durance()
#wait(.25)
#exit = self.to_trav()
self._bone_armor()
wait(self._cast_duration, self._cast_duration +.2)
self._clay_golem()
wait(self._cast_duration, self._cast_duration +.2)
self._pather.traverse_nodes([911], self)
# wiggle to unstick merc....
pos_m = convert_abs_to_monitor((-20, -150))
self.pre_move()
self.move(pos_m, force_move=True)
wait(self._cast_duration, self._cast_duration +.2)
# move up a bit
pos_m = convert_abs_to_monitor((-20, -40))
self.pre_move()
self.move(pos_m, force_move=True)
wait(self._cast_duration, self._cast_duration +.1)
atk_pos_abs = self._pather.find_abs_node_pos(229, grab())
if atk_pos_abs is None:
Logger.debug("Could not find node [229]. Using static attack coordinates instead.")
atk_pos_abs = [-300, -40]
else:
atk_pos_abs = [atk_pos_abs[0], atk_pos_abs[1] + 70]
cast_pos_abs =
|
np.array([atk_pos_abs[0] * 1.0, atk_pos_abs[1] * 1.0])
|
numpy.array
|
import itertools
import warnings
import numpy as np
from scipy import spatial, special
from scipy.optimize import OptimizeResult
from rt_opt.global_search import run_and_tumble
from rt_opt.local_search import bfgs_b
def _prepare_bounds(bounds, n_dims):
"""
Check size and validity of a rectangular bounds object, and turn it into the required format for
the following calculations.
:param bounds: [array-like object] Rectangular bounds object
:param n_dims: [int] Dimensionality of the problem
:return: (bound_lower [np.array], bound_upper [np.array])
"""
if bounds is not None:
bounds = np.array(bounds)
if n_dims is not None:
assert bounds.shape == (n_dims, 2), ('bounds has wrong shape. Expected shape: ' +
'(n_dims, 2), where n_dims is the ' +
'dimensionality of the problem.')
bound_lower = bounds[:, 0]
bound_upper = bounds[:, 1]
assert (bound_upper > bound_lower).all(), ('Upper bound must always be larger than lower ' +
'bound.')
return bound_lower, bound_upper
else:
assert n_dims is not None, 'If bounds is None, n_dims must be provided.'
return np.repeat(-np.inf, n_dims), np.repeat(np.inf, n_dims)
def _prepare_x0(x0, n_bacteria_per_dim, max_dims, n_reduced_dims_eff):
"""
Check and prepare initial conditions object x0. If x0 is a vector, that is, if it has the shape
(n_dims,) it is duplicated times the total number of bacteria, which is given by
i) n_bacteria = n_bacteria_per_dim ** n_dims if n_dims <= max_dims or
ii) n_bacteria = n_bacteria_per_dim ** n_reduced_dims_eff if n_dims > max_dims.
:param x0: [array-like object] Initial conditions object. Must have the shape
(n_bacteria, n_dims) or (n_dims,)
:param n_bacteria_per_dim: [int] Number of bacteria for each dimension
:param max_dims: [int] Maximum dimension of problems to be solved without using Sequential
Random Embeddings
:param n_reduced_dims_eff: [int] Number of effective reduced dimensions used by the Sequential
Random Embeddings algorithm
:return: Initial conditions for all bacteria [np.array of shape (n_bacteria, n_dims)]
"""
x0 = np.array(x0)
if len(x0.shape) == 1:
n_dims = x0.shape[0]
n_bacteria = (n_bacteria_per_dim ** n_dims if n_dims <= max_dims else
n_bacteria_per_dim ** n_reduced_dims_eff)
x0_population = np.tile(x0, (n_bacteria, 1))
elif len(x0.shape) == 2:
n_dims = x0.shape[1]
n_bacteria = x0.shape[0]
n_bacteria_target = (n_bacteria_per_dim ** n_dims if n_dims <= max_dims else
n_bacteria_per_dim ** n_reduced_dims_eff)
if n_bacteria != n_bacteria_target:
warnings.warn('The number of bacteria given by x0 does not match the number of ' +
'bacteria given by the relation ' +
'n_bacteria = n_bacteria_per_dim ** n_dims if n_dims <= max_dims else ' +
'n_bacteria_per_dim ** (n_reduced_dims + 1). The latter implies that ' +
f'n_bacteria = {n_bacteria_target}, whereas the former implies ' +
f'that n_bacteria = {n_bacteria}. Using n_bacteria = {n_bacteria}.')
x0_population = x0.copy()
else:
raise ValueError('x0 must be an array of either the shape (n_bacteria, n_dims) or ' +
'(n_dims,).')
return x0_population
def _pad_trace(trace, targetLength):
"""
Pad single-bacteria trace to given length.
:param trace: [np.array] Single-bacteria trace
:param targetLength: [int] Desired length
:return: Padded trace [np.array]
"""
currentLength = trace.shape[0]
paddingLength = (targetLength - currentLength)
return np.pad(trace, [(0, paddingLength), (0, 0)], mode="edge")
def _sequential_random_embeddings(f, x0, bounds, n_reduced_dims_eff=3, n_embeddings=10,
verbosity=1, **optimizer_kwargs):
"""
Implementation of the Sequential Random Embeddings algorithm described in
+++++
<NAME>, <NAME>, and <NAME>, Derivative-Free Optimization of High-Dimensional Non-Convex
Functions by Sequential Random Embeddings, Proceedings of the Twenty-Fifth International Joint
Conference on Artificial Intelligence, AAAI Press (2016).
+++++
The idea is basically to reduce high-dimensional problems to low-dimensional ones by embedding
the original, high-dimensional search space ℝ^h into a low dimensional one, ℝ^l, by
sequentially applying the random linear transformation
x(n+1) = α(n+1)x(n) + A•y(n+1), x ∈ ℝ^h, y ∈ ℝ^l, A ∈ N(0, 1)^(h×l), α ∈ ℝ
and minimizing the objective function f(αx + A•y) w.r.t. (α, y).
:param f: [callable] Objective function. Must accept its argument x as numpy array
:param x0: [np.array] Initial values for the bacteria population in the original,
high-dimensional space ℝ^h
:param bounds: [callable] Bounds projection, see description of parameter
``projection_callback`` in :func:`local_search.bfgs_b`
:param n_reduced_dims_eff: [int] Effective dimension of the embedded problem, ℝ^(l+1)
:param n_embeddings: [int] Number of embedding iterations
:param verbosity: [int] Output verbosity. Must be 0, 1, or 2
:param optimizer_args: [dict] Arguments to pass to the actual optimization routine
:return: Best minimum of f found [scipy.optimize.OptimizeResult]
"""
assert verbosity in [0, 1, 2], 'verbosity must be 0, 1, or 2.'
orig_dim = x0.shape[1]
x = np.zeros(orig_dim)
x_best = x.copy()
f_best = np.inf
nfev = nit = 0
success_best = False
for i in range(n_embeddings):
A = np.random.normal(size=(orig_dim, n_reduced_dims_eff - 1))
# Normalize rows of A
normalization_sum = A.sum(axis=1)
normalization_sum = np.where(normalization_sum == 0, 1, normalization_sum)
A = A / normalization_sum[:, np.newaxis]
def f_embedded(arg): return f(bounds(arg[0] * x + A.dot(arg[1:]))[0])
# Set up bounds callback
def bounds_embedded(arg):
bounds_hit = np.zeros(len(arg), dtype=bool)
x_proj, bounds_hit_orig = bounds(arg[0] * x + A.dot(arg[1:]))
if bounds_hit_orig.any(): # Boundary hit in original, non-embedded variable
arg[1:] = np.linalg.lstsq(A, x_proj - arg[0] * x, rcond=None)[0]
bounds_hit[1:] = (A[bounds_hit_orig] != 0).any(axis=0)
return arg, bounds_hit
# Set up y0
y0 =
|
np.zeros((x0.shape[0], n_reduced_dims_eff))
|
numpy.zeros
|
import os, sys
from refinery import app,db
from pubsub import msgServer
from refinery.data.models import Dataset, Experiment, TopicModelEx, DataDoc, Folder
from flask import g, request, render_template, Response, jsonify
bnpydir = "bnpy/bnpy-dev"
sys.path.append(bnpydir)
import bnpy
from bnpy.data import WordsData
from customFunc import getModelState
from refinery import celery
import json
import numpy as np
import scipy as sp
from math import log
vocab = {}
def pubsub_name(d,e):
return 'analysis_' + str(d) + "_" + str(e)
def set_tm_status(username,fID,ex,st):
'''set the topic model status and publish it to the menus pubsub'''
ex.status = st
msgServer.publish(username + "Xmenus","tmstatus," + str(fID) + "," + st)
@app.route('/<username>/viz_tm/<int:folder_id>/')
def topic_model_viz(username=None, folder_id=None):
'''serve the topic model visualization'''
f = Folder.query.get(folder_id)
ex = Experiment.query.get(f.tm_id)
ei = ex.getExInfo()
vocabfile = f.vocab_path()
vocab = [x.strip() for x in open(vocabfile,'r')]
totalDox = f.N()
return render_template("topicmodel.html", d=f, ex=ex, vocab=vocab, totalDox=totalDox, stopwords=" ".join(ei.stopwords))
@app.route('/<username>/data/<int:data_id>/<int:ex_id>/load_analysis_tm')
def load_analysis_tm(username=None, data_id=None, ex_id=None):
'''
load the topic modeling analysis
this is called immediately by the topicmodeling viz page
'''
[topW,topic_probs,doc_tops,lms] = Experiment.query.get(ex_id).getExInfo().viz_data
return jsonify(topW=topW,topic_probs=[x for x in topic_probs],doc_tops=doc_tops)
@app.route('/<username>/change_stopwords/<int:folder_id>', methods=['POST'])
def change_stopwords(username=None,folder_id=None):
stopwords = request.form['stopwords'].strip().split()
f = Folder.query.get(folder_id)
ei = Experiment.query.get(f.tm_id).getExInfo()
ei.stopwords = stopwords
db.session.commit()
return Response("200")
#TODO Duplicates code in summarize.py
@app.route('/<username>/get_doc_text', methods=['POST'])
def get_doc_text(username=None):
filename = 'refinery/static/users/' + username + "/documents/" + request.form['filename']
lines = [l.strip() for l in open(filename)]
doctext = "\n".join([l for l in lines if len(l) > 0])
return render_template("docview.html",doctext=doctext.decode('utf-8'))
@app.route('/<username>/set_num_topics/<int:folder_id>', methods=['POST'])
def set_num_topics(username=None, folder_id=None):
'''set the number of topics for a folder'''
v = request.form['v']
f = Folder.query.get(folder_id)
ex = Experiment.query.get(f.tm_id)
tm = ex.getExInfo()
tm.nTopics = int(v)
set_tm_status(username,folder_id, ex, 'idle')
db.session.commit()
return Response(status='200')
@app.route('/<username>/data/<int:data_id>/<int:ex_id>/start_analysis_tm')
def run_analysis_tm(username=None, data_id=None, ex_id=None):
'''Run topic modeling learning - passes it off to celery and returns'''
msgServer.publish(pubsub_name(data_id,ex_id), "%s" % ('Starting analysis'))
run_topic_modeling.apply_async([username,data_id,ex_id])
return Response(status='200')
# Run the analysis
@app.route('/<username>/make_subset/<int:folder_id>/<int:ex_id>',methods=['POST'])
def make_subset(username=None, folder_id=None, ex_id=None):
dOld = Folder.query.get(folder_id)
[topW,topic_probs,doc_tops,lms] = Experiment.query.get(ex_id).getExInfo().viz_data
nTops = len(doc_tops[0])
nDox = int(request.form['nDox'])
searchwords = request.form['searchwords'].split(" ")
topicblocks = [int(x) for x in request.form.getlist('blocks[]')]
#avg each word's renormalized p(w|z)
blockDist = [0.0 for _ in xrange(nTops)]
tot = 0.0
for v in topicblocks:
if(v >= 0):
blockDist[v] += 1.0
tot += 1.0
if(tot > 0):
blockDist = [x/tot for x in blockDist]
else:
blockDist = [0.0 for x in blockDist]
'''
get target KL distribution from search terms
sort by kl - below
'''
vocabfile = dOld.vocab_path()
vocab = {}
idx = 0
vv = [x.strip() for x in open(vocabfile,'r')]
for v in vv:
vocab[v] = idx
idx += 1
searchSmooth = .0000001
searchDist = [searchSmooth for _ in xrange(nTops)]
sdTotal = searchSmooth * nTops
for sw in searchwords:
if sw in vocab:
idx = vocab[sw]
for tidx in xrange(nTops):
for i,p in topW[tidx]:
if i == idx:
searchDist[tidx] += p
sdTotal += p
searchDist = [x/sdTotal for x in searchDist]
if len(request.form['searchwords'].strip()) > 0:
targDist = [(a + b)/2.0 for a,b in zip(searchDist,blockDist)] # needs the search component
else:
targDist = blockDist
targTotal = sum(targDist)
if targTotal == 0:
targDist = [1.0/float(nTops) for x in xrange(nTops)]
else:
targDist = [x/targTotal for x in targDist]
L = np.array(lms,dtype=np.float16)
searchLM = np.log(np.transpose(np.array(targDist,dtype=np.float16).dot(L)))
D = sp.sparse.dok_matrix((dOld.N(),dOld.vocabSize),dtype=np.float16)
wordcounts = open(dOld.wordcount_path(),'r')
totalW = np.zeros((dOld.N()))
for line in wordcounts:
[d,w,c] = [int(x) for x in line.strip().split(",")]
D[d,w] = c
totalW[d] += c
wordcounts.close()
D = D.asformat("csr")
lmScores = D.dot(searchLM)
lmScores = np.divide(lmScores,totalW)
lmScores = np.divide(np.power(2.0,np.multiply(lmScores,-1.0)),totalW)
inds =
|
np.argsort(lmScores)
|
numpy.argsort
|
import numpy as np
import pandas as pd
from pandas import Series
import os
import sys
from sklearn.metrics import confusion_matrix
def standard_confusion_matrix(y_true, y_pred):
'''
Reformat confusion matrix output from sklearn for plotting profit curve.
'''
[[tn, fp], [fn, tp]] = confusion_matrix(y_true, y_pred, labels=[0,1])
return np.array([[tp, fp], [fn, tn]])
def standard_confusion_matrix_for_top_ranked_percent(y_test_list, y_probability_list, threshold, percent_ranked_instances ):
'''
This function is meant to test models that would be used to make N predictions (limited) rather than the whole test set;
- this is useful when we want to see how many FPs might exist in the first N ranked predictions
- mainly considered a helper function when evaluating how a model might be used in practice
'''
y_probability = np.asarray(y_probability_list)
y_test_labels =
|
np.array(y_test_list)
|
numpy.array
|
#! /usr/bin/env python
"""
The idea is based on <NAME> Keplerspline.pro
IDL module that iteratively fits 4th-orther B-splines
to the data to remove trends.
Breakpoints for each campaign come from A.V.
the `knots` utility program defines inner knots
and is based on <NAME>'s bspline_bkpts.pro
modified and simplified for SciPy
"""
import numpy as np
from astropy.io import fits
from scipy.interpolate import LSQUnivariateSpline,UnivariateSpline,interp1d
from scipy.special import erf,erfc,betainc,binom
from scipy.signal import medfilt
# check if fitsio is installed
try:
import fitsio
nofitsio = False
except ImportError:
nofitsio = True
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
#here some changes
def get_knots(x, dt = None, npts = None, k=4,verbose=False):
"""
determines the inner knots for a spline
the satisfy the Shoenberg-Whiteney conditions
"""
# if there is an empty list, return it and fail
n = len(x)
if n<1:
return x, (True,)
# Get the range in x
x = np.array(x)
x.sort() # sort x from low to high
x_range = x[-1] - x[0]
##########################################################
## Get evenly spaced knots #
## knots must be internal to the #
## abcissa. We first generate #
## a list evenly spaced on [min(x) + dt/2,max(x) - dt/2) #
## OLD #t = np.arange(x[0]+ dt/2.,x[-1]-dt/2.,dt)
##########################################################
# if dt is given, use it
if dt is not None:
npts = int(x_range / dt) + 1
tempdt = x_range/(npts - 1.)
if npts < 2: npts = 2
t = np.arange(npts,dtype=float) * tempdt + x[0]
# if dt not given & npts is, divide
elif npts is not None:
npts = int(npts)
tempdt = x_range/(npts - 1.)
t = np.arange(npts,dtype=float) * tempdt + x[0]
else:
npts = 11
tempdt = x_range/(npts - 1.)
print(('Defaulting to %i knots. dt = %0.2f'%(npts,dt)))
t = np.arange(npts,dtype=float) * tempdt + x[0]
if np.nanmin(x) < np.min(t):
t[np.argmin(t)] = np.nanmin(x)
if np.nanmax(x) > np.max(t):
t[np.argmax(t)] = np.nanmax(x)
t = t[(t>np.min(x)) & (t<
|
np.max(x)
|
numpy.max
|
from dataclasses import dataclass, replace
from functools import cached_property
from typing import Tuple, Type, Union, Optional, Dict, Callable, List
from collections import namedtuple
from itertools import dropwhile
from warnings import warn
import numpy as np
from numpy import ndarray
from scipy.spatial import cKDTree
from ..element import (Element, ElementHex1, ElementQuad1, ElementQuad2,
ElementTetP1, ElementTriP1, ElementTriP2, ElementLineP1,
ElementTetP2, ElementHex2, BOUNDARY_ELEMENT_MAP)
@dataclass(repr=False)
class Mesh:
doflocs: ndarray
t: ndarray
_boundaries: Optional[Dict[str, ndarray]] = None
_subdomains: Optional[Dict[str, ndarray]] = None
elem: Type[Element] = Element
affine: bool = False
validate: bool = False # unused; for backwards compatibility
# Some parts of the library, most notably the normal vector construction in
# ElementGlobal._eval_dofs, assume that the element indices are ascending
# because this leads to consistent normal vectors for both elements sharing
# a facet. Therefore, the element indices are sorted in a triangle mesh.
# However, some algorithms (e.g., adaptive refinement) require switching
# off this behaviour and, hence, this flag exists.
sort_t: bool = False
@property
def p(self):
return self.doflocs
@property
def dofs(self):
from skfem.assembly import Dofs
if not hasattr(self, '_dofs'):
self._dofs = Dofs(self, self.elem())
return self._dofs
@property
def refdom(self):
return self.elem.refdom
@property
def brefdom(self):
return self.elem.refdom.brefdom
@property
def bndelem(self):
return BOUNDARY_ELEMENT_MAP[self.elem]()
@property
def nelements(self):
return self.t.shape[1]
@property
def nvertices(self):
return np.max(self.t) + 1
@property
def nfacets(self):
return self.facets.shape[1]
@property
def nedges(self):
return self.edges.shape[1]
@property
def nnodes(self):
return self.t.shape[0]
@property
def subdomains(self):
return self._subdomains
@property
def boundaries(self):
return self._boundaries
@property
def facets(self):
if not hasattr(self, '_facets'):
self._init_facets()
return self._facets
@property
def t2f(self):
if not hasattr(self, '_t2f'):
self._init_facets()
return self._t2f
@property
def f2t(self):
if not hasattr(self, '_f2t'):
self._f2t = self.build_inverse(self.t, self.t2f)
return self._f2t
@property
def edges(self):
if not hasattr(self, '_edges'):
self._init_edges()
return self._edges
@property
def t2e(self):
if not hasattr(self, '_t2e'):
self._init_edges()
return self._t2e
@cached_property
def bbox(self):
"""Bounding box"""
return np.array([np.min(self.p, axis=1), np.max(self.p, axis=1)]).T
def dim(self):
return self.elem.refdom.dim()
def boundary_facets(self) -> ndarray:
"""Return an array of boundary facet indices."""
return np.nonzero(self.f2t[1] == -1)[0]
def boundary_edges(self) -> ndarray:
"""Return an array of boundary edge indices."""
facets = self.boundary_facets()
boundary_edges = np.sort(np.hstack(
tuple([np.vstack((self.facets[itr, facets],
self.facets[(itr + 1) % self.facets.shape[0],
facets]))
for itr in range(self.facets.shape[0])])).T, axis=1)
edge_candidates = np.unique(self.t2e[:, self.f2t[0, facets]])
A = self.edges[:, edge_candidates].T
B = boundary_edges
dims = A.max(0) + 1
ix = np.where(np.in1d(np.ravel_multi_index(A.T, dims),
np.ravel_multi_index(B.T, dims)))[0]
return edge_candidates[ix]
def with_boundaries(self,
boundaries: Dict[str, Callable[[ndarray], ndarray]]):
"""Return a copy of the mesh with named boundaries.
Parameters
----------
boundaries
A dictionary of lambda functions with the names of the boundaries
as keys. The midpoint of the facet should return ``True`` for the
corresponding lambda function if the facet belongs to the boundary.
"""
return replace(
self,
_boundaries={
**({} if self._boundaries is None else self._boundaries),
**{name: self.facets_satisfying(test, True)
for name, test in boundaries.items()}
},
)
def with_subdomains(self,
subdomains: Dict[str, Callable[[ndarray], ndarray]]):
"""Return a copy of the mesh with named subdomains.
Parameters
----------
boundaries
A dictionary of lambda functions with the names of the subdomains
as keys. The midpoint of the element should return ``True`` for
the corresponding lambda function if the element belongs to the
subdomain.
"""
return replace(
self,
_subdomains={
**({} if self._subdomains is None else self._subdomains),
**{name: self.elements_satisfying(test)
for name, test in subdomains.items()},
}
)
def boundary_nodes(self) -> ndarray:
"""Return an array of boundary node indices."""
return np.unique(self.facets[:, self.boundary_facets()])
def interior_nodes(self) -> ndarray:
"""Return an array of interior node indices."""
return np.setdiff1d(np.arange(0, self.p.shape[1]),
self.boundary_nodes())
def nodes_satisfying(self,
test: Callable[[ndarray], ndarray],
boundaries_only: bool = False) -> ndarray:
"""Return nodes that satisfy some condition.
Parameters
----------
test
A function which returns ``True`` for the set of nodes that are to
be included in the return set.
boundaries_only
If ``True``, include only boundary facets.
"""
nodes = np.nonzero(test(self.p))[0]
if boundaries_only:
nodes = np.intersect1d(nodes, self.boundary_nodes())
return nodes
def facets_satisfying(self,
test: Callable[[ndarray], ndarray],
boundaries_only: bool = False) -> ndarray:
"""Return facets whose midpoints satisfy some condition.
Parameters
----------
test
A function which returns ``True`` for the facet midpoints that are
to be included in the return set.
boundaries_only
If ``True``, include only boundary facets.
"""
midp = [np.sum(self.p[itr, self.facets], axis=0) / self.facets.shape[0]
for itr in range(self.dim())]
facets = np.nonzero(test(np.array(midp)))[0]
if boundaries_only:
facets = np.intersect1d(facets, self.boundary_facets())
return facets
def elements_satisfying(self,
test: Callable[[ndarray], ndarray]) -> ndarray:
"""Return elements whose midpoints satisfy some condition.
Parameters
----------
test
A function which returns ``True`` for the element midpoints that
are to be included in the return set.
"""
midp = [np.sum(self.p[itr, self.t], axis=0) / self.t.shape[0]
for itr in range(self.dim())]
return np.nonzero(test(np.array(midp)))[0]
def _expand_facets(self, ix: ndarray) -> Tuple[ndarray, ndarray]:
"""Return vertices and edges corresponding to given facet indices.
Parameters
----------
ix
An array of facet indices.
"""
vertices = np.unique(self.facets[:, ix].flatten())
if self.dim() == 3:
edge_candidates = self.t2e[:, self.f2t[0, ix]].flatten()
# subset of edges that share all points with the given facets
subset = np.nonzero(
np.prod(np.isin(self.edges[:, edge_candidates],
self.facets[:, ix].flatten()),
axis=0)
)[0]
edges = np.intersect1d(self.boundary_edges(),
edge_candidates[subset])
else:
edges = np.array([], dtype=np.int64)
return vertices, edges
class FakeMesh:
def __init__(self, p, t, facets, t2f, f2t, refdom):
self.p = p
self.t = t
self.facets = facets
self.t2f = t2f
self.f2t = f2t
self.refdom = refdom
def dim(self):
return self.refdom.dim()
def _mapping(self):
"""Return a default reference mapping for the mesh."""
from skfem.mapping import MappingAffine, MappingIsoparametric
if not hasattr(self, '_cached_mapping'):
fakemesh = Mesh.FakeMesh(self.doflocs, self.dofs.element_dofs,
self.facets, self.t2f, self.f2t,
self.elem.refdom)
if self.affine:
self._cached_mapping = MappingAffine(fakemesh)
else:
self._cached_mapping = MappingIsoparametric(
fakemesh,
self.elem(),
self.bndelem,
)
return self._cached_mapping
def _init_facets(self):
"""Initialize ``self.facets``."""
self._facets, self._t2f = self.build_entities(
self.t,
self.elem.refdom.facets,
)
def _init_edges(self):
"""Initialize ``self.edges``."""
self._edges, self._t2e = self.build_entities(
self.t,
self.elem.refdom.edges,
)
def __post_init__(self):
"""Support node orders used in external formats.
We expect ``self.doflocs`` to be ordered based on the
degrees-of-freedom in :class:`skfem.assembly.Dofs`. External formats
for high order meshes commonly use a less strict ordering scheme and
the extra nodes are described as additional rows in ``self.t``. This
method attempts to accommodate external formats by reordering
``self.doflocs`` and changing the indices in ``self.t``.
"""
if self.sort_t:
self.t = np.sort(self.t, axis=0)
if not isinstance(self.doflocs, ndarray):
# for backwards compatibility: support standard lists
self.doflocs = np.array(self.doflocs, dtype=np.float64)
if not isinstance(self.t, ndarray):
# for backwards compatibility: support standard lists
self.t = np.array(self.t, dtype=np.int64)
M = self.elem.refdom.nnodes
if self.nnodes > M:
# reorder DOFs to the expected format: vertex DOFs are first
p, t = self.doflocs, self.t
t_nodes = t[:M]
uniq, ix = np.unique(t_nodes, return_inverse=True)
self.t = (np.arange(len(uniq), dtype=np.int64)[ix]
.reshape(t_nodes.shape))
doflocs = np.hstack((
p[:, uniq],
np.zeros((p.shape[0], np.max(t) + 1 - len(uniq))),
))
doflocs[:, self.dofs.element_dofs[M:].flatten('F')] =\
p[:, t[M:].flatten('F')]
self.doflocs = doflocs
# C_CONTIGUOUS is more performant in dimension-based slices
if self.doflocs.flags['F_CONTIGUOUS']:
if self.doflocs.shape[1] > 1000:
warn("Transforming over 1000 vertices to C_CONTIGUOUS.")
self.doflocs = np.ascontiguousarray(self.doflocs)
if self.t.flags['F_CONTIGUOUS']:
if self.t.shape[1] > 1000:
warn("Transforming over 1000 elements to C_CONTIGUOUS.")
self.t = np.ascontiguousarray(self.t)
def __add__(self, other):
"""Join two meshes."""
if not isinstance(other, type(self)):
raise TypeError("Can only join meshes with same type.")
p = np.hstack((self.p, other.p))
t = np.hstack((self.t, other.t + self.p.shape[1]))
tmp = np.ascontiguousarray(p.T)
tmp, ixa, ixb = np.unique(tmp.view([('', tmp.dtype)] * tmp.shape[1]),
return_index=True, return_inverse=True)
p = p[:, ixa]
t = ixb[t]
cls = type(self)
return cls(p, t)
def __repr__(self):
return "{} mesh with {} vertices and {} elements.".format(
self.elem.refdom.name,
self.nvertices,
self.nelements,
)
def __str__(self):
return self.__repr__()
def save(self,
filename: str,
point_data: Optional[Dict[str, ndarray]] = None,
**kwargs) -> None:
"""Export the mesh and fields using meshio.
Parameters
----------
filename
The output filename, with suffix determining format;
e.g. .msh, .vtk, .xdmf
point_data
Data related to the vertices of the mesh.
"""
from skfem.io.meshio import to_file
return to_file(self, filename, point_data, **kwargs)
@classmethod
def load(cls, filename):
from skfem.io.meshio import from_file
return from_file(filename)
@classmethod
def from_dict(cls, data):
"""For backwards compatibility."""
if 'p' not in data or 't' not in data:
raise ValueError("Dictionary must contain keys 'p' and 't'.")
else:
data['p'] = np.ascontiguousarray(np.array(data['p']).T)
data['t'] = np.ascontiguousarray(np.array(data['t']).T)
if 'boundaries' in data and data['boundaries'] is not None:
data['boundaries'] = {k: np.array(v)
for k, v in data['boundaries'].items()}
if 'subdomains' in data and data['subdomains'] is not None:
data['subdomains'] = {k: np.array(v)
for k, v in data['subdomains'].items()}
data['doflocs'] = data.pop('p')
data['_subdomains'] = data.pop('subdomains')
data['_boundaries'] = data.pop('boundaries')
return cls(**data)
def to_dict(self) -> Dict[str, Optional[Dict[str, List[float]]]]:
"""For backwards compatibility."""
boundaries: Optional[Dict[str, List[float]]] = None
subdomains: Optional[Dict[str, List[float]]] = None
if self.boundaries is not None:
boundaries = {k: v.tolist() for k, v in self.boundaries.items()}
if self.subdomains is not None:
subdomains = {k: v.tolist() for k, v in self.subdomains.items()}
return {
'p': self.p.T.tolist(),
't': self.t.T.tolist(),
'boundaries': boundaries,
'subdomains': subdomains,
}
@classmethod
def from_mesh(cls, mesh):
"""Reuse an existing mesh by adding nodes.
Parameters
----------
mesh
The mesh used in the initialization. Connectivity of the new mesh
will match ``mesh.t``.
"""
from skfem.assembly import Dofs
mapping = mesh._mapping()
nelem = cls.elem
dofs = Dofs(mesh, nelem())
locs = mapping.F(nelem.doflocs.T)
doflocs = np.zeros((locs.shape[0], dofs.N))
# match mapped dofs and global dof numbering
for itr in range(locs.shape[0]):
for jtr in range(dofs.element_dofs.shape[0]):
doflocs[itr, dofs.element_dofs[jtr]] = locs[itr, :, jtr]
return cls(
doflocs=doflocs,
t=mesh.t,
)
@classmethod
def init_refdom(cls):
"""Initialize a mesh corresponding to the reference domain."""
return cls(cls.elem.refdom.p, cls.elem.refdom.t)
def refined(self, times_or_ix: Union[int, ndarray] = 1):
"""Return a refined mesh.
Parameters
----------
times_or_ix
Either an integer giving the number of uniform refinements or an
array of element indices for adaptive refinement.
"""
m = self
if isinstance(times_or_ix, int):
for _ in range(times_or_ix):
m = m._uniform()
else:
m = m._adaptive(times_or_ix)
return m
def scaled(self, factors):
"""Return a new mesh with scaled dimensions.
Parameters
----------
factors
Scale each dimension by a factor.
"""
if isinstance(factors, float):
# for backwards compatibility
factors = self.doflocs.shape[0] * [factors]
return replace(
self,
doflocs=np.array([self.doflocs[itr] * factors[itr]
for itr in range(len(factors))]),
)
def translated(self, diffs):
"""Return a new translated mesh.
Parameters
----------
diffs
Translate the mesh by a vector. Must have same size as the mesh
dimension.
"""
return replace(
self,
doflocs=np.array([self.doflocs[itr] + diffs[itr]
for itr in range(len(diffs))]),
)
def mirrored(self,
normal: Tuple[float, ...],
point: Optional[Tuple[float, ...]] = None):
"""Return a mesh mirrored with respect to a normal.
Meant to be combined with the other methods to build more general
meshes, e.g.,
>>> from skfem import MeshTet
>>> m1 = MeshTet()
>>> m2 = m1.mirrored((1, 0, 0))
>>> m3 = m1.mirrored((0, 1, 0))
>>> m4 = m1.mirrored((0, 0, 1))
>>> m = m1 + m2 + m3 + m4
>>> (m.nvertices, m.nelements)
(20, 20)
Parameters
----------
normal
The normal vector of the mirror plane.
point
An optional point through which the plane passes. By default, the
point corresponds to the origin.
"""
if point is None:
point = (0,) * self.dim()
p = self.p.copy()
p0 = np.array(point)
n = np.array(normal)
n = n / np.linalg.norm(n)
p += - 2. * np.dot(n, p - p0[:, None]) * n[:, None] + p0[:, None]
return replace(
self,
doflocs=p,
)
def _uniform(self):
"""Perform a single uniform refinement."""
raise NotImplementedError
def _adaptive(self, ix: ndarray):
"""Adaptively refine the given set of elements."""
raise NotImplementedError
def _splitref(self, nrefs: int = 1):
"""Split mesh into separate nonconnected elements and refine.
Used for visualization purposes.
Parameters
----------
nrefs
The number of refinements.
"""
cls = type(self)
m = cls.init_refdom().refined(nrefs)
X = m.p
x = self._mapping().F(m.p)
# create connectivity for the new mesh
nt = self.nelements
t = np.tile(m.t, (1, nt))
dt = np.max(t)
t += ((dt + 1)
* (np.tile(np.arange(nt), (m.t.shape[0] * m.t.shape[1], 1))
.flatten('F')
.reshape((-1, m.t.shape[0])).T))
if X.shape[0] == 1:
p = np.array([x.flatten()])
else:
p = x[0].flatten()
for itr in range(len(x) - 1):
p = np.vstack((p, x[itr + 1].flatten()))
return cls(p, t)
@staticmethod
def build_entities(t, indices, sort=True):
"""Build low dimensional topological entities."""
indexing = np.hstack(tuple([t[ix] for ix in indices]))
sorted_indexing = np.sort(indexing, axis=0)
sorted_indexing, ixa, ixb = np.unique(sorted_indexing,
axis=1,
return_index=True,
return_inverse=True)
mapping = ixb.reshape((len(indices), t.shape[1]))
if sort:
return np.ascontiguousarray(sorted_indexing), mapping
return np.ascontiguousarray(indexing[:, ixa]), mapping
@staticmethod
def build_inverse(t, mapping):
"""Build inverse mapping from low dimensional topological entities."""
e = mapping.flatten(order='C')
tix = np.tile(np.arange(t.shape[1]), (1, t.shape[0]))[0]
e_first, ix_first = np.unique(e, return_index=True)
e_last, ix_last = np.unique(e[::-1], return_index=True)
ix_last = e.shape[0] - ix_last - 1
inverse = np.zeros((2, np.max(mapping) + 1), dtype=np.int64)
inverse[0, e_first] = tix[ix_first]
inverse[1, e_last] = tix[ix_last]
inverse[1, np.nonzero(inverse[0] == inverse[1])[0]] = -1
return inverse
@staticmethod
def strip_extra_coordinates(p: ndarray) -> ndarray:
"""Fallback for 3D meshes."""
return p
def param(self) -> float:
"""Return mesh parameter, viz the length of the longest edge."""
raise NotImplementedError
def _reix(self, ix: ndarray) -> Tuple[ndarray, ndarray]:
"""Connect ``self.p`` based on the indices ``ix``."""
ixuniq = np.unique(ix)
t = np.zeros(np.max(ix) + 1, dtype=np.int64)
t[ixuniq] = np.arange(len(ixuniq), dtype=np.int64)
return self.p[:, ixuniq], t[ix]
def remove_elements(self, element_indices: ndarray):
"""Construct a new mesh by removing elements.
Parameters
----------
element_indices
List of element indices to remove.
"""
p, t = self._reix(np.delete(self.t, element_indices, axis=1))
return replace(
self,
doflocs=p,
t=t,
)
def element_finder(self, mapping=None):
"""Return a function handle from location to element index.
Parameters
----------
mapping
The affine mapping for the mesh.
"""
raise NotImplementedError
@dataclass(repr=False)
class Mesh2D(Mesh):
def param(self) -> float:
return np.max(
np.linalg.norm(np.diff(self.p[:, self.facets], axis=1), axis=0)
)
@staticmethod
def strip_extra_coordinates(p: ndarray) -> ndarray:
"""For meshio which appends :math:`z = 0` to 2D meshes."""
return p[:, :2]
def _repr_svg_(self) -> str:
from skfem.visuals.svg import draw
return draw(self, nrefs=2, boundaries_only=True)
@dataclass(repr=False)
class Mesh3D(Mesh):
def param(self) -> float:
return np.max(
np.linalg.norm(np.diff(self.p[:, self.edges], axis=1), axis=0)
)
def edges_satisfying(self, test: Callable[[ndarray], bool]) -> ndarray:
"""Return edges whose midpoints satisfy some condition.
Parameters
----------
test
Evaluates to 1 or ``True`` for edge midpoints of the edges
belonging to the output set.
"""
return np.nonzero(test(self.p[:, self.edges].mean(1)))[0]
def boundary_edges(self) -> ndarray:
"""Return an array of boundary edge indices."""
facets = self.boundary_facets()
boundary_edges = np.sort(np.hstack(
tuple([np.vstack((self.facets[itr, facets],
self.facets[(itr + 1) % self.facets.shape[0],
facets]))
for itr in range(self.facets.shape[0])])).T, axis=1)
edge_candidates = np.unique(self.t2e[:, self.f2t[0, facets]])
A = self.edges[:, edge_candidates].T
B = boundary_edges
dims = A.max(0) + 1
ix = np.where(np.in1d(np.ravel_multi_index(A.T, dims),
np.ravel_multi_index(B.T, dims)))[0]
return edge_candidates[ix]
def interior_edges(self) -> ndarray:
"""Return an array of interior edge indices."""
return np.setdiff1d(np.arange(self.edges.shape[1], dtype=np.int64),
self.boundary_edges())
@dataclass(repr=False)
class MeshTri1(Mesh2D):
doflocs: ndarray = np.array([[0., 0.],
[1., 0.],
[0., 1.],
[1., 1.]], dtype=np.float64).T
t: ndarray = np.array([[0, 1, 2],
[1, 3, 2]], dtype=np.int64).T
elem: Type[Element] = ElementTriP1
affine: bool = True
sort_t: bool = True
@classmethod
def init_tensor(cls: Type, x: ndarray, y: ndarray):
r"""Initialize a tensor product mesh.
The mesh topology is as follows::
*---------------*
|'-.|'-.|`'---._|
|---+---+-------|
|\ |\ |'. |
| \ | \ | '-. |
| \| \| '.|
*---------------*
Parameters
----------
x
The nodal coordinates in dimension `x`.
y
The nodal coordinates in dimension `y`.
"""
npx = len(x)
npy = len(y)
X, Y = np.meshgrid(np.sort(x), np.sort(y))
p = np.vstack((X.flatten('F'), Y.flatten('F')))
ix = np.arange(npx * npy)
nt = (npx - 1) * (npy - 1)
t = np.zeros((3, 2 * nt))
ix = ix.reshape(npy, npx, order='F').copy()
t[0, :nt] = (ix[0:(npy-1), 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[1, :nt] = (ix[1:npy, 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[2, :nt] = (ix[1:npy, 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
t[0, nt:] = (ix[0:(npy-1), 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[1, nt:] = (ix[0:(npy-1), 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
t[2, nt:] = (ix[1:npy, 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
return cls(p, t.astype(np.int64))
@classmethod
def init_symmetric(cls: Type) -> Mesh2D:
r"""Initialize a symmetric mesh of the unit square.
The mesh topology is as follows::
*------------*
|\ /|
| \ / |
| \ / |
| * |
| / \ |
| / \ |
|/ \|
O------------*
"""
p = np.array([[0., 1., 1., 0., .5],
[0., 0., 1., 1., .5]], dtype=np.float64)
t = np.array([[0, 1, 4],
[1, 2, 4],
[2, 3, 4],
[0, 3, 4]], dtype=np.int64).T
return cls(p, t)
@classmethod
def init_sqsymmetric(cls: Type) -> Mesh2D:
r"""Initialize a symmetric mesh of the unit square.
The mesh topology is as follows::
*------*------*
|\ | /|
| \ | / |
| \ | / |
*------*------*
| / | \ |
| / | \ |
|/ | \|
O------*------*
"""
p = np.array([[0., .5, 1., 0., .5, 1., 0., .5, 1.],
[0., 0., 0., .5, .5, .5, 1., 1., 1.]], dtype=np.float64)
t = np.array([[0, 1, 4],
[1, 2, 4],
[2, 4, 5],
[0, 3, 4],
[3, 4, 6],
[4, 6, 7],
[4, 7, 8],
[4, 5, 8]], dtype=np.int64).T
return cls(p, t)
@classmethod
def init_lshaped(cls: Type) -> Mesh2D:
r"""Initialize a mesh for the L-shaped domain.
The mesh topology is as follows::
*-------*
| \ |
| \ |
| \ |
|-------O-------*
| / | \ |
| / | \ |
| / | \ |
*---------------*
"""
p = np.array([[0., 1., 0., -1., 0., -1., -1., 1.],
[0., 0., 1., 0., -1., -1., 1., -1.]], dtype=np.float64)
t = np.array([[0, 1, 7],
[0, 2, 6],
[0, 6, 3],
[0, 7, 4],
[0, 4, 5],
[0, 3, 5]], dtype=np.int64).T
return cls(p, t)
@classmethod
def init_circle(cls: Type,
nrefs: int = 3) -> Mesh2D:
r"""Initialize a circle mesh.
Works by repeatedly refining the following mesh and moving
new nodes to the boundary::
*
/ | \
/ | \
/ | \
*------O------*
\ | /
\ | /
\ | /
*
Parameters
----------
nrefs
Number of refinements, by default 3.
"""
p = np.array([[0., 0.],
[1., 0.],
[0., 1.],
[-1., 0.],
[0., -1.]], dtype=np.float64).T
t = np.array([[0, 1, 2],
[0, 1, 4],
[0, 2, 3],
[0, 3, 4]], dtype=np.int64).T
m = cls(p, t)
for _ in range(nrefs):
m = m.refined()
D = m.boundary_nodes()
tmp = m.p
tmp[:, D] = tmp[:, D] / np.linalg.norm(tmp[:, D], axis=0)
m = replace(m, doflocs=tmp)
return m
def _uniform(self):
p = self.doflocs
t = self.t
sz = p.shape[1]
t2f = self.t2f.copy() + sz
return replace(
self,
doflocs=np.hstack((p, p[:, self.facets].mean(axis=1))),
t=np.hstack((
np.vstack((t[0], t2f[0], t2f[2])),
np.vstack((t[1], t2f[0], t2f[1])),
np.vstack((t[2], t2f[2], t2f[1])),
np.vstack((t2f[0], t2f[1], t2f[2])),
)),
_boundaries=None,
_subdomains=None,
)
@staticmethod
def _adaptive_sort_mesh(p, t):
"""Make (0, 2) the longest edge in t."""
l01 = np.sqrt(np.sum((p[:, t[0]] - p[:, t[1]]) ** 2, axis=0))
l12 = np.sqrt(np.sum((p[:, t[1]] - p[:, t[2]]) ** 2, axis=0))
l02 = np.sqrt(np.sum((p[:, t[0]] - p[:, t[2]]) ** 2, axis=0))
ix01 = (l01 > l02) * (l01 > l12)
ix12 = (l12 > l01) * (l12 > l02)
# row swaps
tmp = t[2, ix01]
t[2, ix01] = t[1, ix01]
t[1, ix01] = tmp
tmp = t[0, ix12]
t[0, ix12] = t[1, ix12]
t[1, ix12] = tmp
return t
@staticmethod
def _adaptive_find_facets(m, marked_elems):
"""Find the facets to split."""
facets = np.zeros(m.facets.shape[1], dtype=np.int64)
facets[m.t2f[:, marked_elems].flatten('F')] = 1
prev_nnz = -1e10
while np.count_nonzero(facets) - prev_nnz > 0:
prev_nnz = np.count_nonzero(facets)
t2facets = facets[m.t2f]
t2facets[2, t2facets[0] + t2facets[1] > 0] = 1
facets[m.t2f[t2facets == 1]] = 1
return facets
@staticmethod
def _adaptive_split_elements(m, facets):
"""Define new elements."""
ix = (-1) * np.ones(m.facets.shape[1], dtype=np.int64)
ix[facets == 1] = (np.arange(np.count_nonzero(facets))
+ m.p.shape[1])
ix = ix[m.t2f]
red = (ix[0] >= 0) * (ix[1] >= 0) * (ix[2] >= 0)
blue1 = (ix[0] == -1) * (ix[1] >= 0) * (ix[2] >= 0)
blue2 = (ix[0] >= 0) * (ix[1] == -1) * (ix[2] >= 0)
green = (ix[0] == -1) * (ix[1] == -1) * (ix[2] >= 0)
rest = (ix[0] == -1) * (ix[1] == -1) * (ix[2] == -1)
# new red elements
t_red = np.hstack((
np.vstack((m.t[0, red], ix[0, red], ix[2, red])),
np.vstack((m.t[1, red], ix[0, red], ix[1, red])),
np.vstack((m.t[2, red], ix[1, red], ix[2, red])),
np.vstack((ix[1, red], ix[2, red], ix[0, red])),
))
# new blue elements
t_blue1 = np.hstack((
np.vstack((m.t[1, blue1], m.t[0, blue1], ix[2, blue1])),
np.vstack((m.t[1, blue1], ix[1, blue1], ix[2, blue1])),
np.vstack((m.t[2, blue1], ix[2, blue1], ix[1, blue1])),
))
t_blue2 = np.hstack((
np.vstack((m.t[0, blue2], ix[0, blue2], ix[2, blue2])),
np.vstack((ix[2, blue2], ix[0, blue2], m.t[1, blue2])),
np.vstack((m.t[2, blue2], ix[2, blue2], m.t[1, blue2])),
))
# new green elements
t_green = np.hstack((
np.vstack((m.t[1, green], ix[2, green], m.t[0, green])),
np.vstack((m.t[2, green], ix[2, green], m.t[1, green])),
))
# new nodes
p = .5 * (m.p[:, m.facets[0, facets == 1]] +
m.p[:, m.facets[1, facets == 1]])
return (
np.hstack((m.p, p)),
np.hstack((m.t[:, rest], t_red, t_blue1, t_blue2, t_green)),
)
def _adaptive(self, marked):
sorted_mesh = replace(
self,
t=self._adaptive_sort_mesh(self.p, self.t),
sort_t=False,
)
facets = self._adaptive_find_facets(sorted_mesh, marked)
doflocs, t = self._adaptive_split_elements(sorted_mesh, facets)
return replace(
self,
doflocs=doflocs,
t=t,
_boundaries=None,
_subdomains=None,
)
def element_finder(self, mapping=None):
if mapping is None:
mapping = self._mapping()
tree = cKDTree(np.mean(self.p[:, self.t], axis=1).T)
def finder(x, y):
ix = tree.query(np.array([x, y]).T, 5)[1].flatten()
X = mapping.invF(np.array([x, y])[:, None], ix)
inside = (
(X[0] >= 0) *
(X[1] >= 0) *
(1 - X[0] - X[1] >= 0)
)
return np.array([ix[np.argmax(inside, axis=0)]]).flatten()
return finder
@dataclass(repr=False)
class MeshQuad1(Mesh2D):
doflocs: ndarray = np.array([[0., 0.],
[1., 0.],
[1., 1.],
[0., 1.]], dtype=np.float64).T
t: ndarray = np.array([[0, 1, 2, 3]], dtype=np.int64).T
elem: Type[Element] = ElementQuad1
def _uniform(self):
p = self.doflocs
t = self.t
sz = p.shape[1]
t2f = self.t2f.copy() + sz
mid = np.arange(t.shape[1], dtype=np.int64) + np.max(t2f) + 1
return replace(
self,
doflocs=np.hstack((
p,
p[:, self.facets].mean(axis=1),
p[:, self.t].mean(axis=1),
)),
t=np.hstack((
np.vstack((t[0], t2f[0], mid, t2f[3])),
np.vstack((t2f[0], t[1], t2f[1], mid)),
np.vstack((mid, t2f[1], t[2], t2f[2])),
np.vstack((t2f[3], mid, t2f[2], t[3])),
)),
_boundaries=None,
_subdomains=None,
)
@classmethod
def init_tensor(cls: Type,
x: ndarray,
y: ndarray):
"""Initialize a tensor product mesh.
The mesh topology is as follows::
*-------------*
| | | |
|---+--+------|
| | | |
| | | |
| | | |
*-------------*
Parameters
----------
x
The nodal coordinates in dimension `x`.
y
The nodal coordinates in dimension `y`.
"""
npx = len(x)
npy = len(y)
X, Y = np.meshgrid(np.sort(x), np.sort(y))
p = np.vstack((X.flatten('F'), Y.flatten('F')))
ix = np.arange(npx * npy)
nt = (npx - 1) * (npy - 1)
t = np.zeros((4, nt))
ix = ix.reshape(npy, npx, order='F').copy()
t[0] = (ix[0:(npy-1), 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[1] = (ix[1:npy, 0:(npx-1)].reshape(nt, 1, order='F')
.copy()
.flatten())
t[2] = (ix[1:npy, 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
t[3] = (ix[0:(npy-1), 1:npx].reshape(nt, 1, order='F')
.copy()
.flatten())
return cls(p, t.astype(np.int64))
def to_meshtri(self, x: Optional[ndarray] = None):
"""Split each quadrilateral into two triangles."""
t = np.hstack((self.t[[0, 1, 3]], self.t[[1, 2, 3]]))
subdomains = None
if self.subdomains:
subdomains = {k: np.concatenate((v, v + self.t.shape[1]))
for k, v in self.subdomains.items()}
mesh = MeshTri1(self.doflocs, t)
boundaries = None
if self.boundaries:
boundaries = {}
for k in self.boundaries:
slots = enumerate(mesh.facets.T)
boundaries[k] = np.array([
next(dropwhile(lambda slot: not(np.array_equal(f,
slot[1])),
slots))[0]
for f in self.facets.T[np.sort(self.boundaries[k])]])
if self._subdomains or self._boundaries:
mesh = replace(
mesh,
_boundaries=boundaries,
_subdomains=subdomains,
)
if x is not None:
if len(x) == self.t.shape[1]:
# preserve elemental constant functions
X = np.concatenate((x, x))
else:
raise Exception("The parameter x must have one value per "
"element.")
return mesh, X
return mesh
def element_finder(self, mapping=None):
"""Transform to :class:`skfem.MeshTri` and return its finder."""
tri_finder = self.to_meshtri().element_finder()
def finder(*args):
return tri_finder(*args) % self.t.shape[1]
return finder
@dataclass(repr=False)
class MeshTri2(MeshTri1):
elem: Type[Element] = ElementTriP2
affine: bool = False
sort_t: bool = False
@classmethod
def init_circle(cls: Type,
nrefs: int = 3) -> Mesh2D:
m = MeshTri1.init_circle(nrefs=nrefs)
M = cls.from_mesh(m)
D = M.dofs.get_facet_dofs(M.boundary_facets()).flatten()
doflocs = M.doflocs.copy()
doflocs[:, D] /= np.linalg.norm(doflocs[:, D], axis=0)
return replace(M, doflocs=doflocs)
@dataclass(repr=False)
class MeshQuad2(MeshQuad1):
elem: Type[Element] = ElementQuad2
@dataclass(repr=False)
class MeshLine1(Mesh):
doflocs: ndarray = np.array([[0., 1.]], dtype=np.float64)
t: ndarray = np.array([[0], [1]], dtype=np.int64)
elem: Type[Element] = ElementLineP1
affine: bool = True
def __post_init__(self):
if len(self.doflocs.shape) == 1:
# support flat arrays
self.doflocs = np.array([self.doflocs])
if self.t.shape[1] != self.doflocs.shape[1] - 1:
# fill self.t assuming ascending self.doflocs if not provided
tmp = np.arange(self.doflocs.shape[1] - 1, dtype=np.int64)
self.t = np.vstack((tmp, tmp + 1))
super().__post_init__()
def __mul__(self, other):
return MeshQuad1.init_tensor(self.p[0], other.p[0])
def _uniform(self):
p, t = self.doflocs, self.t
newp = np.hstack((p, p[:, t].mean(axis=1)))
newt = np.empty((t.shape[0], 2 * t.shape[1]),
dtype=t.dtype)
newt[0, ::2] = t[0]
newt[0, 1::2] = p.shape[1] + np.arange(t.shape[1])
newt[1, ::2] = newt[0, 1::2]
newt[1, 1::2] = t[1]
return replace(
self,
doflocs=newp,
t=newt,
_boundaries=None,
_subdomains=None,
)
def _adaptive(self, marked):
p, t = self.doflocs, self.t
mid = range(len(marked)) + np.max(t) + 1
nonmarked = np.setdiff1d(np.arange(t.shape[1]), marked)
newp = np.hstack((p, p[:, t[:, marked]].mean(1)))
newt = np.vstack((t[0, marked], mid))
newt = np.hstack((t[:, nonmarked],
newt,
np.vstack((mid, t[1, marked]))))
return replace(
self,
doflocs=newp,
t=newt,
)
def param(self):
return np.max(np.abs(self.p[0, self.t[1]] - self.p[0, self.t[0]]))
def element_finder(self, mapping=None):
ix = np.argsort(self.p)
def finder(x):
maxix = (x == np.max(self.p))
x[maxix] = x[maxix] - 1e-10 # special case in np.digitize
return np.argmax(np.digitize(x, self.p[0, ix[0]])[:, None]
== self.t[0], axis=1)
return finder
@staticmethod
def strip_extra_coordinates(p: ndarray) -> ndarray:
return p[:, :1]
@dataclass(repr=False)
class MeshTet1(Mesh3D):
doflocs: ndarray = np.array([[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.],
[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.],
[1., 1., 1.]], dtype=np.float64).T
t: ndarray = np.array([[0, 1, 2, 3],
[3, 5, 1, 7],
[2, 3, 6, 7],
[2, 3, 1, 7],
[1, 2, 4, 7]], dtype=np.int64).T
elem: Type[Element] = ElementTetP1
affine: bool = True
def element_finder(self, mapping=None):
if mapping is None:
mapping = self._mapping()
tree = cKDTree(np.mean(self.p[:, self.t], axis=1).T)
def finder(x, y, z):
ix = tree.query(np.array([x, y, z]).T, 5)[1].flatten()
X = mapping.invF(np.array([x, y, z])[:, None], ix)
inside = (
(X[0] >= 0) *
(X[1] >= 0) *
(X[2] >= 0) *
(1 - X[0] - X[1] - X[2] >= 0)
)
return np.array([ix[np.argmax(inside, axis=0)]]).flatten()
return finder
def _uniform(self):
t = self.t
p = self.p
sz = p.shape[1]
t2e = self.t2e.copy() + sz
# new vertices are the midpoints of edges
newp = np.hstack((p, p[:, self.edges].mean(axis=1)))
# compute middle pyramid diagonal lengths and choose shortest
d1 = ((newp[0, t2e[2]] - newp[0, t2e[4]]) ** 2 +
(newp[1, t2e[2]] - newp[1, t2e[4]]) ** 2)
d2 = ((newp[0, t2e[1]] - newp[0, t2e[3]]) ** 2 +
(newp[1, t2e[1]] - newp[1, t2e[3]]) ** 2)
d3 = ((newp[0, t2e[0]] - newp[0, t2e[5]]) ** 2 +
(newp[1, t2e[0]] - newp[1, t2e[5]]) ** 2)
I1 = d1 < d2
I2 = d1 < d3
I3 = d2 < d3
c1 = I1 * I2
c2 = (~I1) * I3
c3 = (~I2) * (~I3)
# splitting the pyramid in the middle;
# diagonals are [2,4], [1,3] and [0,5]
newt = np.hstack((
np.vstack((t[0], t2e[0], t2e[2], t2e[3])),
np.vstack((t[1], t2e[0], t2e[1], t2e[4])),
np.vstack((t[2], t2e[1], t2e[2], t2e[5])),
np.vstack((t[3], t2e[3], t2e[4], t2e[5])),
|
np.vstack((t2e[2, c1], t2e[4, c1], t2e[0, c1], t2e[1, c1]))
|
numpy.vstack
|
import numpy as np
class World(object):
def __init__(self, width=30, height=20):
self.width = width
self.height = height
self.states = [np.full((height, width), 0)]
self.current_day = 0
def next_day(self):
if self.current_day == len(self.states) - 1:
current_day = self.states[-1]
u =
|
np.pad(current_day, ((1, 0), (0, 0)))
|
numpy.pad
|
import os
import warnings
import numpy as np
import scipy
import scipy.linalg as sl
from sklearn.decomposition import TruncatedSVD
from . import constants
from . import cifti_utils
def create_dir(path):
"""If the dir in path does not exist, create it.
:param path: The dir path.
"""
if not os.path.exists(path):
os.makedirs(path)
def remove_elements_from_list(input_list, elements):
""" Remove all elements from list that are also on another list.
:param input_list: The list to remove from.
:param elements: The elements to remove.
:return: The list without elements.
"""
input_set = set(input_list)
if len(input_list) != len(input_set):
warnings.warn("In remove_elements_from_list in utils: " +
"the input list contains duplicates which will be removed, "
"if this is not the desired behavior please implement this "
"yourself.")
return list(input_set - set(elements))
def add_ones_column_to_matrix(mat):
""" Add a column of 1's
Usually needed for linear algebra.
:param mat: The original matrix
:return: The matrix with another 1's column, as it's first column.
"""
shape = list(mat.shape)
shape[1] += 1
res = np.ones(shape)
res[:, 1:] = mat
return res
def fsl_glm(x, y):
"""Translation of the MATLAB fsl_glm method into python.
Args:
x: data
y: labels
Returns:
the t-coefficient of the data.
"""
beta = sl.lstsq(x, y)[0]
r = y - np.dot(x, beta)
dof = np.size(y, 0) - np.linalg.matrix_rank(x)
sigma_sq = np.sum(r ** 2, axis=0) / dof
grot = np.diag(scipy.linalg.inv((x.transpose().dot(x))))
varcope = grot.reshape([len(grot), 1]).dot(sigma_sq.reshape([1, len(sigma_sq)]))
t = beta / np.sqrt(varcope)
t[np.isnan(t)] = 0
return t
def fsl_demean(x, dim=None):
"""Implementation of the MATLAB fsl_demean method into python
:param x: The data to demean.
:param dim: The dim index to demean by.
:return: The demeaned data.
"""
if dim is None:
dim = 0
if x.shape[0] > 1:
dim = 0
elif x.shape[1] > 1:
dim = 1
dims = x.shape
dim_size = dims[dim]
dim_rep = np.ones([len(dims)])
dim_rep[dim] = dim_size
mean =
|
np.mean(x, dim, keepdims=True)
|
numpy.mean
|
# A simple Psi 4 input script to compute a SCF reference using Psi4's libJK
# Requires numpy 1.7.2+
#
# Created by: <NAME>
# Date: 4/1/15
# License: GPL v3.0
#
import time
import numpy as np
import helper_HF as scf_helper
import scipy.linalg as SLA
np.set_printoptions(precision=5, linewidth=200, suppress=True)
import psi4
# Memory for Psi4 in GB
psi4.set_memory('2 GB')
psi4.core.set_output_file('output.dat', False)
# Memory for numpy in GB
numpy_memory = 2
# Triplet O2
mol = psi4.geometry("""
0 5
O
O 1 1.2
symmetry c1
""")
psi4.set_options({'guess': 'core',
'basis': 'aug-cc-pvtz',
'scf_type': 'df',
'e_convergence': 1e-8,
'reference': 'rohf'})
wfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('BASIS'))
# Set occupations
nocc = wfn.nalpha()
ndocc = wfn.nbeta()
nsocc = nocc - ndocc
# Set defaults
maxiter = 15
max_micro = 5
micro_print = True
micro_conv = 5.e-3
E_conv = 1.0E-8
D_conv = 1.0E-8
# Integral generation from Psi4's MintsHelper
t = time.time()
mints = psi4.core.MintsHelper(wfn.basisset())
S = np.asarray(mints.ao_overlap())
nbf = S.shape[0]
#I = np.array(mints.ao_eri())
print('\nNumber of doubly occupied orbitals: %d' % ndocc)
print('Number of singly occupied orbitals: %d' % nsocc)
print('Number of basis functions: %d' % nbf)
V = np.asarray(mints.ao_potential())
T = np.asarray(mints.ao_kinetic())
print('\nTotal time taken for integrals: %.3f seconds.' % (time.time()-t))
t = time.time()
# Build H_core
H = T + V
# Orthogonalizer A = S^(-1/2)
A = mints.ao_overlap()
A.power(-0.5, 1.e-16)
A = np.asarray(A)
def SCF_Hx(x, moFa, moFb, C):
"""
Compute a hessian vector guess where x is a ov matrix of nonredundant operators.
"""
Co_a = C[:, :nocc]
Co_b = C[:, :ndocc]
C_right_a = np.dot(C[:, nocc:], x[:, nsocc:].T)
C_right_b = np.dot(C[:, ndocc:], x[:ndocc, :].T)
J, K = scf_helper.compute_jk(jk, [Co_a, Co_b], [C_right_a, C_right_b])
J1, J2 = J
K1, K2 = K
IAJB = (C[:, :nocc].T).dot(J1 - 0.5 * K1 - 0.5 * K1.T).dot(C[:, ndocc:])
IAJB += 0.5 * np.dot(x[:, nsocc:], moFa[nocc:, ndocc:])
IAJB -= 0.5 * np.dot(moFa[:nocc, :nocc], x)
IAJB[:, :nsocc] = 0.0
iajb = (C[:, :nocc].T).dot(J2 - 0.5 * K2 - 0.5 * K2.T).dot(C[:, ndocc:])
iajb += 0.5 * np.dot(x, moFb[ndocc:, ndocc:])
iajb -= 0.5 * np.dot(moFb[:nocc, :ndocc], x[:ndocc, :])
iajb[ndocc:, :] = 0.0
IAjb = (C[:, :nocc].T).dot(J2).dot(C[:, ndocc:])
IAjb[ndocc:] += 0.5 *
|
np.dot(x[:, :nsocc].T, moFb[:nocc, ndocc:])
|
numpy.dot
|
import numpy as np
import scipy.stats as st
import seaborn as sns
import matplotlib.pyplot as plt
mus = np.array([5, 5])
sigmas = np.array([[1, .9], [.9, 1]])
def circle(x, y):
return (x-1)**2 + (y-2)**2 - 3**2
def pgauss(x, y):
return st.multivariate_normal.pdf([x, y], mean=mus, cov=sigmas)
def metropolis_hastings(p, iter=1000):
x, y = 0., 0.
samples =
|
np.zeros((iter, 2))
|
numpy.zeros
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import glob
import struct
import random
import argparse
import numpy as np
from scipy import optimize
from itertools import repeat, chain
from functools import lru_cache, reduce
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor
from .pretraining_dataset import CachedDataLoader, packed_data_file_format
@lru_cache(maxsize=None)
def get_packing_strategies(start_length, minimum_increment, target_length, depth):
"""Recursively build a list of unique packing "strategies".
These strategies represent the ways that up to "depth" many sequences can
be packed together to produce a packed sequence of exactly "target_length"
tokens in total. For example [1, 2, 509] represent the packing strategy of
combining a sequence of length 1, a sequence of length 2, and sequence of
length 509 to create one packed sequence of length 512.
To ensure strategies are unique, each sequence added to the pack must be
at least as long as the previous sequence added to the pack. This is
tracked through the "minimum_increment" variable and results in the
strategies containing sequence lengths in sorted order e.g. [1, 2, 509]
but not [2, 1, 509]
Parameters
----------
start_length : int
The current cumulative number of tokens in the pack.
Typically initalized to 0.
minimum_increment : int
The minimum length of the next sequence can be added to the pack.
Typically initialized to 1.
target_length : int
The target_length for a pack of sequences (e.g. 512).
depth : int
Remaining depth in the recursion (must be > 0).
Returns
-------
strategies : list[list[int]]
A list of strategies where each strategy is a list of integers
representing sequence lengths of the components in the pack. Each
strategy should have at most "depth" entries and sum up to "target_length".
"""
gap = target_length - start_length
strategies = []
# Complete the packing with exactly 1 number
if depth == 1:
if gap >= minimum_increment:
strategies.append([gap])
# Complete the sample in "depth" steps, recursively
else:
for new in range(minimum_increment, gap + 1):
new_gap = target_length - start_length - new
if new_gap == 0:
strategies.append([new])
else:
options = get_packing_strategies(start_length + new, new, target_length, depth - 1)
for option in options:
if len(option) > 0:
strategies.append([new] + option)
return strategies
def get_packing_matrix(strategy_set, max_sequence_length):
"""Construct a packing matrix from a set of packing strategies.
The packing matrix "A" is of shape [max_sequence_length, len(strategy_set)].
Each column of the matrix corresponds to a strategy and each row corrsponds
to the usage of a particular sequence length across all strategies.
This matrix is typically very sparse. For instance for packing depth 3,
each strategy uses at most 3 sequences leading to 3 non-zero entires in
that strategy's column in A. The density of the matrix is then only
3/max_sequence_length. This sparsity can be exploited to further speed-up the
packing algorithm.
Parameters
----------
strategy_set : list[list[int]]
A list of unique strategies as returned by get_packing_strategies.
max_sequence_length : int
The target or maximum sequence length of the packing problem.
Returns
-------
A : np.array of shape [max_sequence_length, len(strategy_set)]
The packing matrix for the provided strategy set.
"""
num_strategies = len(strategy_set)
A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32)
for i, strategy in enumerate(strategy_set):
for seq_len in strategy:
A[seq_len - 1, i] += 1
return A
def get_packing_recipe(args, sequence_lengths):
"""Given program arguments and a list of sequence lengths return the packing recipe.
A "packing recipe" primarily consists of a set of strategies "strategy_set" and the "mixture"
which states how many times each one of these strategies should be applied in order to pack
the dataset. Additionally, we also return the "padding" vector which states how many sequences
of a given sequence length need to be added to our dataset in order use the proposed mixture
of strategies.
Parameters
----------
args : namedtuple containing the following attributes
max_sequence_length : int
The maximum sequence length to which the sequences will be packed. Used to generate the
appropriate packing strategies.
max_sequences_per_pack : int
The maximum number of sequences that can ever be put into a pack. Used to generate the
appropriate packing strategies.
drop_unpacked_remainder : bool
Whether to drop the sequences that could not be packed (usually a very small percentage)
If false, then the unpacked sequences will be padded instead.
sequence_lengths : list[int]
A list containing the sequence length of each example in the un-packed dataset.
Returns
-------
strategy_set : list[list[int]]
The list of unique packing strategies with which the packing problem
was solved.
mixture : list[int] pf shape [len(strategy_set)]
States how many times each of the strategies from the strategy set
should be repeated to cover the entire dataset.
padding : list[int] pf shape [max_sequence_length]
For each sequence length how many padding sequence of that length
need to be created to realize the packing mixture.
"""
print("Entering packing solver".center(80, "_"))
# List all unique ways of packing to the desired maximum sequence length
strategy_set = get_packing_strategies(0, 1, args.max_sequence_length, args.max_sequences_per_pack)
for strategy in strategy_set:
assert(sum(strategy) == args.max_sequence_length)
num_strategies = len(strategy_set)
print(f"Packing will involve {num_strategies} unique packing strategies.",
f"at a maximum {args.max_sequences_per_pack} sequences per pack.")
# Get the packing matrix corresponding to this list of packing strategies
A = get_packing_matrix(strategy_set, args.max_sequence_length)
# To achieve more robust convergence of the packing problem we create
# weights that penalize the residual on short sequences less.
# In other words we allow short sequences (up to length padding_cutoff)
# to be over-used to a larger degree than longer sequences
padding_cutoff = 8
w0 = np.ones([args.max_sequence_length])
w0[:padding_cutoff] = padding_cutoff / (2 * args.max_sequence_length)
w0 = np.sqrt(w0)
# Histogram of sequence lengths
histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, args.max_sequence_length + 2))
# Solve the packing problem
# A@mixture = histogram
# i.e. find the non-negative "mixture" of strategies such that the
# packing matches the distribution of sequences lengths (histogram) as
# closely as possbile in the least squares sense
print(f"Sequences to pack: ", histogram.sum())
start = time.time()
mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * histogram)
print(f"Solving non-negative least squares took {time.time() - start:3.2f} seconds.")
# Round the floating point solution to integer).
# The relative error introduced by this is relatively small since we are
# dealing with millions of sequences while rounding introduces a residual
# of around ~ max_sequence_length sequences.
residual_float = histogram - A @ mixture
mixture = np.rint(mixture)
# Compute the residuals
residual = histogram - A @ mixture
rounding_residual = abs(residual_float - residual).sum()
print(f"Total residual of packing mixture: {abs(residual).sum():3.1f}",
f"Total residual introduced by rounding mixture to int: {rounding_residual:3.2f}",
f"Residual on first 8 categories: {
|
np.around(residual[:8], 4)
|
numpy.around
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import torch
# # Neural Networks
# * El uso de funciones de activacion no lineares como la diferencia clave entre modelos lineales
# * Los diferentes tipos de funciones de activacion
# * El modulo `nn` de PyTorch que contiene los bloques para construir NNs
# * Resolver un problema simple de un _fit_ lineal con una NN
# ## Neuronas artificiales
#
# * Neural networks: entidades matematicas capaces de representar funciones complicadas a traves de una composicion de funciones mas simples.
# * Originalmente inspiradas por la forma en la que funciona nuestro cerebro.
# * El bloque de construccion basico es una neurona:
# * Esencialmente una transformacion linear del input (e.g. multiplicacion del input por un numero, el _weight_, y la suma de una constante, el _bias_.
# * Seguido por la aplicacion de una funcion no lineal (referida como la funcion de activacion)
# * $o = f(w x + b)$
# * x es nuestro input, w el _weight_ y b el _bias_. $f$ es la funcion de activacion.
# * x puede ser un escalar o un vector de valores, w puede ser un escalar o una matriz, mientras que b es un escalar o un vector.
# * La expresion $o = f(w x + b)$ es una capa de neuronas, ya que representa varias neuronas a traves de los _weights_ y _bias_ multidimensionales
# $x_1 = f(w_0 x_0 + b_0)$
#
# $x_2 = f(w_1 x_1 + b_1)$
#
# $...$
#
# $y = f(w_n x_n + b_n)$
# ### **dibujos**
# ## Funciones de activacion
# * Nuestro modelo anterior ya tenia una operacion lineal. Eso era el modelo entero.
# * El rol de la funcion de activacion es concentrar los _outputs_ de la operacion lineal precedente a un rango dado.
# * Si queremos asignar un _score_ al output del modelo necesitamos limitar el rango de numeros posibles para ese _score_
# * `float32`
# * $\sum wx + b$
# ### Que opciones tenemos?
# * Una opcion seria ponerle un limite a los valores del _output_.
# * Cualquier cosa debajo de cero seria cero
# * cualquier cosa arriba de 10 seria 10
# * `torch.nn.Hardtanh`
# In[3]:
import math
math.tanh(-2.2) # camion
# In[4]:
math.tanh(0.1) # oso
# In[5]:
math.tanh(2.5) # perro
# 
# * Hay muchas funciones de activacion.
# * Por definicion, las funciones de activacion:
# * Son no lineales. Aplicaciones repetidas de $wx+b$ sin una funcion de activacion resultan en una polinomial. La no linealidad permite a la red aproximar funciones mas complejas.
# * Son diferenciables, para poder calcular las gradientes a traves de ellas. Discontinuidades de punto como en `Hatdtanh` o `ReLU` son validas.
# * Sin esto, las redes caen a ser polinomiales complicadas o dificiles de entrenar.
# * Adicionalmente, las funciones:
# * Tienen al menos un rango sensible, donde cambios no triviales en el input resultan en cambio no trivial correspondiente en el output
# * Tienen al menos un rango no sensible (o saturado), donde cambios al input resultan en poco o ningun cambio en el output.
# * Por utlimo, las fuciones de activacion tienen al menos una de estas:
# * Un limite inferior que se aproxima (o se encuentra) mientras el input tiende a negativo infinito.
# * Un limite superior similar pero inverso para positivo infinito.
# * Dado lo que sabemos de como funciona back-propagation
# * Sabemos que los errores se van a propagar hacia atras a traves de la activacion de manera mas efectiva cuando los inputs se encuentran dentro del rango de respuesta.
# * Por otro lado, los errores no van a afectar a las neuornas para cuales el _input_ esta saturado debido a que la gradiente estara cercana a cero.
# ### En conclusion
#
# * En una red hecha de unidades lineales + activaciones, cuando recibe diferentes _inputs_:
# * diferentes unidades van a responder en diferentes rangos para los mismos inputs
# * los errores asociados a esos inputs van a afectar a las neuronas operancio en el rango sensible, dejando a las otras unidades mas o menos igual en el proceso de aprendizaje.
# * Juntar muchas operaciones lineales + unidades de activacion en paralelo y apilandolas una sobre otra nos provee un objeto matematico capaz de aproximar funciones complicadas.
# * Diferentes combinaciones de unidades van a responder a inputs en diferentes rangos
# * Esos parametros son relativamente faciles de optimizar a traves de SGD
# ### Dibujo graficas computacionales separadas
# In[7]:
import torch.nn as nn
linear_model = nn.Linear(1, 1)
linear_model(val_t_un)
# Todas las subclases de `nn.Module` tienen un metodo `call` definido. Esto permite crear una instancia de `nn.Linear` y llamarla como si fuera una funcion.
#
# Llamar una instancia de `nn.Module` con un conjunto de argumetnos termina llamando un metodo llamado `forward` con esos mismos argumentos
# ### Implementacion de `Module.call`
#
# (simplificado para claridad)
# In[8]:
def __call__(self, *input, **kwargs):
for hook in self._forward_pre_hooks.values():
hook(self, input)
result = self.forward(*input, **kwargs)
for hook in self._forward_hooks.values():
hook_result = hook(self, input, result)
# ...
for hook in self._backward_hooks.values():
# ...
return result
# ### De regreso al modelo lineal
# In[9]:
import torch.nn as nn
linear_model = nn.Linear(1, 1)
linear_model(val_t_un)
# `nn.Linear` acepta tres argumentos:
# * el numero de input features: size del input = 1
# * numero de output features: size del outpu = 1
# * si incluye un bias o no (por default es `True`)
# In[10]:
linear_model.weight
# In[11]:
linear_model.bias
# In[12]:
x = torch.ones(1)
linear_model(x)
# * Nuestro modelo toma un input y produce un output
# * `nn.Module` y sus subclases estan diseniados para hacer eso sobre multiples muestras al mismo tiempo
# * Para acomodar multiples muestras los modulos esperan que la dimension 0 del input sea el numero de muestras en un _batch_
# * Cualquier module en `nn` esta hecho para producir outputs para un _batch_ de multiples inputs al mismo tiempo.
# * B x Nin
# * B es el tamanio del _batch_
# * Nin el numero de input features
# In[13]:
x = torch.ones(10, 1)
linear_model(x)
# Para un dataset de imagenes:
# * BxCxHxW
# In[14]:
t_c.size()
# In[6]:
t_c = [0.5, 14.0, 15.0, 28.0, 11.0, 8.0, 3.0, -4.0, 6.0, 13.0, 21.0] # Temperatura en grados celsios
t_u = [35.7, 55.9, 58.2, 81.9, 56.3, 48.9, 33.9, 21.8, 48.4, 60.4, 68.4] # Unidades desconocidas
t_c = torch.tensor(t_c).unsqueeze(1) # Agregamos una dimension para tener B x N_inputs
t_u = torch.tensor(t_u).unsqueeze(1) # Agregamos una dimension para tener B x N_inputs
n_samples = t_u.shape[0]
n_val = int(0.2 * n_samples)
shuffled_indices = torch.randperm(n_samples)
train_indices = shuffled_indices[:-n_val]
val_indices = shuffled_indices[-n_val:]
train_t_u = t_u[train_indices]
train_t_c = t_c[train_indices]
val_t_u = t_u[val_indices]
val_t_c = t_c[val_indices]
train_t_un = 0.1 * train_t_u
val_t_un = 0.1 * val_t_u
# In[15]:
import torch.nn as nn
import torch.optim as optim
params_old = torch.tensor([1.0, 0.0], requires_grad=True)
learning_rate_old = 1e-1
optimizer_old = optim.Adam([params_old], lr=learning_rate_old)
linear_model = nn.Linear(1, 1)
optimizer = optim.SGD(
linear_model.parameters(), # reemplazamos [params] con este metodo
lr=1e-2)
# ### linear_model.parameters()
# In[16]:
list(linear_model.parameters())
# In[17]:
def training_loop(model, n_epochs, optimizer, loss_fn, train_x, val_x, train_y, val_y):
for epoch in range(1, n_epochs + 1):
train_t_p = model(train_x) # ya no tenemos que pasar los params
train_loss = loss_fn(train_t_p, train_y)
with torch.no_grad(): # todos los args requires_grad=False
val_t_p = model(val_x)
val_loss = loss_fn(val_t_p, val_y)
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
if epoch == 1 or epoch % 1000 == 0:
print(f"Epoch {epoch}, Training loss {train_loss}, Validation loss {val_loss}")
# In[18]:
linear_model = nn.Linear(1, 1)
optimizer = optim.SGD(linear_model.parameters(), lr=1e-2)
training_loop(
n_epochs=3000,
optimizer=optimizer,
model=linear_model,
loss_fn=nn.MSELoss(), # Ya no estamos usando nuestra loss function hecha a mano
train_x = train_t_un,
val_x = val_t_un,
train_y = train_t_c,
val_y = val_t_c)
print()
print(linear_model.weight)
print(linear_model.bias)
# ## Finalmente un Neural Network
# * Ultimo paso: reemplazar nuestro modelo lineal
# * No va a ser mejor
# * Lo unico que vamos a cambiar va a ser el modelo
# * Un simple NN:
# * Una capa lineal
# * Activacion
# * "hidden layers"
# In[19]:
seq_model = nn.Sequential(
nn.Linear(1, 13), # El 13 es arbitrario
nn.Tanh(),
nn.Linear(13, 1) # Este 13 debe hacer match con el primero
)
seq_model
# * El resultado final es un modelo que toma los inputs esperados por el primer modulo (_layer_)
# * Pasa los outputs intermedios al resto de los modulos
# * Produce un output retornado por el ultimo modulo
# In[20]:
[param.size() for param in seq_model.parameters()]
# * Estos son los parametros que el optimizador va a recibir
# * Al llamar `backward()` todos los parametros se van a llenar con su `grad`
# * El optimizador va a actualizar el valor de `grad` durante `optimizer.step()`
# In[21]:
for name, param in seq_model.named_parameters():
print(name, param.size())
# In[22]:
from collections import OrderedDict
named_seq_model = nn.Sequential(OrderedDict([
('hidden_linear', nn.Linear(1, 8)),
('hidden_activation', nn.Tanh()),
('output_linear', nn.Linear(8, 1))
]))
seq_model
# In[23]:
for name, param in named_seq_model.named_parameters():
print(name, param.size())
# In[24]:
named_seq_model.output_linear.bias
# Util para inspeccionar parametros o sus gradientes.
# In[ ]:
optimizer = optim.SGD(seq_model.parameters(), lr=1e-3)
training_loop(
n_epochs=5000,
optimizer=optimizer,
model=seq_model,
loss_fn=nn.MSELoss(), # Ya no estamos usando nuestra loss function hecha a mano
train_x = train_t_un,
val_x = val_t_un,
train_y = train_t_c,
val_y = val_t_c)
print('output', seq_model(val_t_un))
print('answer', val_t_c)
print('hidden', seq_model.hidden_linear.weight.grad)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
z = x + y
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
print(start.elapsed_time(end))
# Tambien podemos evaluar el modelo en toda la data y ver que tan diferente es de una linea:
# In[26]:
from matplotlib import pyplot as plt
t_range = torch.arange(20., 90.).unsqueeze(1)
fig = plt.figure(dpi=600)
plt.xlabel("Fahrenheit")
plt.ylabel("Celsius")
plt.plot(t_u.numpy(), t_c.numpy(), 'o')
plt.plot(t_range.numpy(), seq_model(0.1 * t_range).detach().numpy(), 'c-')
plt.plot(t_u.numpy(), seq_model(0.1 * t_u).detach().numpy(), 'kx')
plt.show()
# ## Subclassing nn.Module
#
# * sublcassing `nn.Module` nos da mucha mas flexibilidad.
# * La interface especifica que como minimo debemos definir un metodo `forward` para la subclase
# * `forward` toma el input al model y regresa el output
# * Si usamos las operaciones de `torch`, `autograd` se encarga de hacer el `backward` pass de forma automatica
#
# * Normalmente vamos a definir los submodulos que usamos en el metodo `forward` en el constructor
# * Esto permite que sean llamados en `forward` y que puedan mantener sus parametros a durante la existencia de nuestro modulo
# In[27]:
class SubclassModel(nn.Module):
def __init__(self):
super().__init__()
self.hidden_linear = nn.Linear(1, 13)
self.hidden_activation = nn.Tanh()
self.output_linear = nn.Linear(13, 1)
def forward(self, input):
hidden_t = self.hidden_linear(input)
activated_t = self.hidden_activation(hidden_t)
#activated_t = self.hidden_activation(hidden_t) if random.random() > 0.5 else hidden_t
output_t = self.output_linear(activated_t)
return output_t
subclass_model = SubclassModel()
subclass_model
# * Nos permite manipular los outputs de forma directa y transformarlo en un tensor BxN
# * Dejamos la dimension de batch como -1 ya que no sabemos cuantos inputs van a venir por batch
# * Asignar una instancia de `nn.Module` a un atributo en un `nn.Module` registra el modulo como un submodulo.
# * Permite a `Net` acceso a los `parameters` de sus submodulos sin necesidad de hacerlo manualmente
# In[28]:
numel_list = [p.numel() for p in subclass_model.parameters()]
sum(numel_list), numel_list
# **Lo que paso**
#
# * `parameters()` investiga todos los submodulos asignados como atributos del constructor y llama `parameters` de forma recursiva.
# * Al accesar su atributo `grad`, el cual va a ser llenado por el `autograd`, el optimizador va a saber como cambiar los parametros para minimizar el _loss_
# In[29]:
for type_str, model in [('seq', seq_model), ('named_seq', named_seq_model), ('subclass', subclass_model)]:
print(type_str)
for name_str, param in model.named_parameters():
print("{:21} {:19} {}".format(name_str, str(param.shape), param.numel()))
print()
# In[30]:
class SubclassFunctionalModel(nn.Module):
def __init__(self):
super().__init__()
self.hidden_linear = nn.Linear(1, 14)
self.output_linear = nn.Linear(14, 1)
def forward(self, input):
hidden_t = self.hidden_linear(input)
activated_t = torch.tanh(hidden_t)
output_t = self.output_linear(activated_t)
return output_t
func_model = SubclassFunctionalModel()
func_model
# ## Ejercicios
# * Experimenten con el numero de neuronas en el modelo al igual que el learning rate.
# * Que cambios resultan en un output mas lineal del modelo?
# * Pueden hacer que el modelo haga un overfit obvio de la data?
#
# * Cargen la [data de vinos blancos](https://archive.ics.uci.edu/ml/datasets/wine+quality) y creen un modelo con el numero apropiado de inputs
# * Cuanto tarda en entrenar comparado al dataset que hemos estado usando?
# * Pueden explicar que factores contribuyen a los tiempos de entrenamiento?
# * Pueden hacer que el _loss_ disminuya?
# * Intenten graficar la data
# In[84]:
import time
start = time.time()
seq_model = nn.Sequential(
nn.Linear(1, 3000),
nn.Tanh(),
nn.Linear(3000, 1) #
)
optimizer = optim.SGD(seq_model.parameters(), lr=1e-4)
training_loop(
n_epochs=9000,
optimizer=optimizer,
model=seq_model,
loss_fn=nn.MSELoss(), # Se utiliza la función pytorch, no la generada manualmente
train_x = train_t_un,
val_x = val_t_un,
train_y = train_t_c,
val_y = val_t_c)
end = time.time()
print(end - start)
# # Experimentando con el numero de neuronas en el modelo al igual que el learning rate.
# ### Que cambios resultan en un output mas lineal del modelo?
#
# A un mayor número de repeticiones y neuronas la función de perdida reduce su tamaño, para lo cual se le atribuye mayor exactitud a un aumento del número de neuronas. Un numero de learning rate muy pequeño puede no implicar mejoras significativas, sin embargo el mejor resultado en la función loss se observa en el nivel 1e-4 debido a que cualquier denotación mayor no implicaba un aporte al modelo.
#
#
#
# ### Pueden hacer que el modelo haga un overfit obvio de la data?
#
# Un overfit obvio se puede generar al crear demasiadas neuronas y al fijar el learning rate en el monto más alto permitido
#
# In[62]:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# In[63]:
whine_df = pd.read_csv("winequality-white.csv", sep=";")
whine_df.head()
# In[65]:
corr = whine_df.corr()
corr
# In[74]:
X = whine_df['alcohol']
y = whine_df['quality']
# In[75]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
# In[76]:
X_train =
|
np.array(X_train)
|
numpy.array
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 21 08:56:18 2019
@author: rdamseh
"""
import os
from VascGraph.Tools.CalcTools import prunG, reduceG,\
getMiddleGraph, rescaleG, \
findNodes, getBranches, fixG, getCoreGraph
from VascGraph.Tools.VisTools import visG
import numpy as np
try:
from mayavi import mlab
except: pass
try:
from matplotlib import pyplot as plt
except: pass
class ValidateNetMets:
def __init__(self, Gr, Ge,
rescale=False,
middle=False,
prune=False,
outputfolder='results',
sigma=[10,20,30,40,50,60]):
self.Gr=Gr.copy()
self.Ge=Ge.copy()
if prune:
self.Gr=prunG(Gr.copy())
self.Ge=prunG(Ge.copy())
#middle graphs
if middle:
self.Gr=getMiddleGraph(self.Gr, middle)
self.Ge=getMiddleGraph(self.Ge, middle)
#rescale graphs
if rescale:
self.Gr=rescaleG(self.Gr)
self.Ge=rescaleG(self.Ge)
#find graphs vertices
self.points_real=np.array(self.Gr.GetNodesPos())
self.points_exp=np.array(self.Ge.GetNodesPos())
#find burifications (junction nodes)
self.idNodes_real, self.nodes_real = findNodes(self.Gr)
self.nodes_real=np.array(self.nodes_real)
#
self.idNodes_exp, self.nodes_exp = findNodes(self.Ge)
self.nodes_exp=np.array(self.nodes_exp)
# num of all nodes
self.n_nodes_r=np.shape(self.nodes_real)[0]
self.n_nodes_e=np.shape(self.nodes_exp)[0]
#reduced graphs
self.G_real_reduced=reduceG(self.Gr.copy())
self.G_exp_reduced=reduceG(self.Ge.copy())
# get branches
self.branches1=getBranches(self.Gr)
self.branches2=getBranches(self.Ge)
self.outputfolder=outputfolder
self.sigma=sigma
def vis(self, save=False, name=None, cam=None):
from VascGraph.Tools.VisTools import setCam, createCam
from VascGraph.GraphLab import GraphPlot
def plot(g, color):
gplot=GraphPlot()
gplot.Update(g)
gplot.SetGylphSize(.01)
gplot.SetTubeRadius(2)
gplot.SetTubeColor(color)
gplot.SetTubeRadiusByScale(True)
bgcolor=(1,1,1)
if cam is None:
position = [1194.8393680906522, 1491.5272445674307, -874.4021568391549]
focal_point = [257.15006008258143, 256.92547521800316, 330.6489784843938]
view_angle = 30.0
view_up = [-0.4853531757850406, -0.39346331460859185, -0.7807809646838195]
clipping_range = [940.3721291401878, 3256.3268137240707]
cam=createCam(position=position,
focal_point=focal_point,
view_angle=view_angle,
view_up=view_up,
clipping_range=clipping_range)
# visulize matching
mlab.figure(bgcolor=bgcolor)
plot(self.Gr, color=(.3,.3,.8))
plot(self.Gcore_real, color=(.3,.3,.8))
plot(self.Gcompared_real, color=(.9,.9,.1))
setCam(cam)
if save:
mlab.savefig(name+'_FN.png', size=(1024,1024))
#
mlab.figure(bgcolor=bgcolor)
plot(self.Ge, color=(.3,.3,.8))
plot(self.Gcore_exp, color=(.3,.3,.8))
plot(self.Gcompared_exp, color=(.9,.9,.1))
setCam(cam)
if save:
mlab.savefig(name+'_FP.png', size=(1024,1024))
def matchG(self):
# REAL TO EXP
self.dist1=[]
for idx, i in enumerate(self.nodes_real):
self.dist1.append(np.sum((i-self.nodes_exp)**2, axis=1))
#real nodes with the corresponding exp. ones
self.idx1=np.argmin(self.dist1, axis=1)
self.d1=[i[self.idx1[j]]**.5 for j, i in enumerate(self.dist1)]
self.idNodes_exp_m=np.array(self.idNodes_exp)[self.idx1]
self.nodes_exp_m=self.nodes_exp[self.idx1]
# EXP TO REAL
self.dist2=[]
for idx, i in enumerate(self.nodes_exp):
self.dist2.append(np.sum((i-self.nodes_real)**2, axis=1))
#exp nodes with the corresponding real. ones
self.idx2=np.argmin(self.dist2, axis=1)
self.d2=[i[self.idx2[j]]**.5 for j, i in enumerate(self.dist2)]
self.idNodes_real_m=np.array(self.idNodes_real)[self.idx2]
self.nodes_real_m=self.nodes_real[self.idx2]
def scoresG(self, portion=[.99],
save=False,
foldername=None):
sigma=self.sigma
self.matchG()
if foldername:
pass
else:
foldername=self.outputfolder
def decideThresh(v, portion):
vals,bins=np.histogram(v,bins=1000)
vals=vals.astype(float)/sum(vals)
s=0
thresh=0
for idx, i in enumerate(vals):
s+=i
if s>portion:
thresh=bins[idx]
break
return thresh
# match nodes and get G scores
self.GFNR=[]
self.GFPR=[]
for j in portion:
thresh1=decideThresh(self.d1,j)
thresh2=decideThresh(self.d2,j)
g_FNR_=[]
for i in sigma:
v1=np.array(self.d1)
v1=v1*(v1<thresh1)
v2=1-np.exp(-v1**2/(2*i*i))
v3=np.mean(v2); g_FNR_.append(v3)
self.GFNR.append(g_FNR_)
g_FPR_=[]
for i in sigma:
v1=np.array(self.d2)
v1=v1*(v1<thresh2)
v2=1-np.exp(-v1**2/(2*i*i))
v3=np.mean(v2); g_FPR_.append(v3)
self.GFPR.append(g_FPR_)
# ravel lists
self.GFNR=np.ravel(self.GFNR)
self.GFPR=np.ravel(self.GFPR)
if save:
path=os.getcwd()
dirr=path+'/'+foldername
if not os.path.exists(dirr):
os.mkdir(dirr)
np.savetxt(dirr+'/GFNR.txt', self.GFNR)
np.savetxt(dirr+'/GFPR.txt', self.GFPR)
np.savetxt(dirr+'/stats.txt', [self.n_nodes_r,
self.n_nodes_e,
self.n_branches_r,
self.n_branches_e])
def plotDist(self, save=False, foldername=None):
try:
import seaborn as sns
except:
print('To run this function, \'seaborn\' sould be installed.')
return
sns.set_style('darkgrid')
if foldername:
pass
else:
foldername=self.outputfolder
plt.figure(figsize=(8.3,5.5))
sns.kdeplot(self.d1,
label=r'$\mathbf{J}_{r}$ $\rightarrow$ $\mathbf{J}_{exp}$',
cut=0, marker='s', markevery=0.05, linewidth=2)
sns.kdeplot(self.d2,
label=r'$\mathbf{J}_{e}$ $\rightarrow$ $\mathbf{J}_{real}$',
cut=0, marker='8', markevery=0.05, linewidth=2)
plt.legend(fontsize=22)
plt.ylabel('Probability', fontsize=20); plt.xlabel('$D$', fontsize=20)
plt.xlim(xmin=0 , xmax=80)
plt.xticks(fontsize = 16)
plt.yticks(fontsize = 16)
if save:
path=os.getcwd()
dirr=path+'/'+foldername
if not os.path.exists(dirr):
os.mkdir(dirr)
plt.savefig(dirr+'/dist.eps', format='eps', dpi=1000, transparent=True)
plt.close()
def matchC(self, sigma=10):
############################
# match nodes in both graphs based on distance threshold
############################
# REAL TO EXP
self.matchG()
self.d1C=np.array(self.d1)
self.idx1_pass=np.where(self.d1C<sigma)[0] #to find matched nodes that pass the condition
self.idNodes_real_pass=np.array(self.idNodes_real)[self.idx1_pass]
self.idx1_fail=
|
np.where(self.d1C>sigma)
|
numpy.where
|
"""
Forced DA Analysis
------------------
Top-level script to run the forced DA analysis, following the procedure described in
`CarlierForcedDA2019`_.
Arguments:
*--Required--*
- **beam** *(int)*: Beam to use.
Flags: **['-b', '--beam']**
Choices: ``[1, 2]``
- **energy** *(MultiClass)*: Beam energy in GeV.
Flags: **['-e', '--energy']**
- **kick_directory** *(MultiClass)*: Analysis kick_directory containing kick files.
Flags: **['-k', '--kickdir']**
- **plane** *(str)*: Plane of the kicks.
Flags: **['-p', '--plane']**
Choices: ``['X', 'Y']``
*--Optional--*
- **emittance_outlier_limit** *(float)*: Limit, i.e. cut from mean, on emittance outliers in meter.
Default: ``5e-07``
- **emittance_tfs** *(MultiClass)*: Dataframe or Path of pre-saved emittance tfs.
- **emittance_type** *(str)*: Which BSRT data to use (from database).
Choices: ``['fit_sigma', 'average']``
Default: ``average``
- **emittance_window_length** *(int)*: Length of the moving average window. (# data points)
Default: ``100``
- **fill** *(int)*: Fill that was used. If not given, check out time_around_kicks.
Flags: **['-f', '--fill']**
- **fit** *(str)*: Fitting function to use (rearranges parameters to make sense).
Choices: ``['exponential', 'linear']``
Default: ``exponential``
- **intensity_tfs** *(MultiClass)*: Dataframe or Path of pre-saved intensity tfs.
- **intensity_time_after_kick** *(int)*: Defines the times after the kicks (in seconds) which is used for intensity averaging to calculate the losses.
Default: ``[5, 30]``
- **intensity_time_before_kick** *(int)*: Defines the times before the kicks (in seconds) which is used for intensity averaging to calculate the losses.
Default: ``[30, 5]``
- **normalized_emittance** *(float)*: Assumed NORMALIZED nominal emittance for the machine.
Default: ``3.7499999999999997e-06``
- **output_directory** *(MultiClass)*: Output kick_directory, if not given subfolder in kick kick_directory
Flags: **['-o', '--outdir']**
- **pagestore_db** *(MultiClass)*: (Path to-) presaved timber database
- **show**: Show plots.
Action: ``store_true``
- **show_wirescan_emittance** *(BoolOrPathOrDataFrame)*: Flag if the emittance from wirescan should also be shown, can also be a Dataframe or Path of pre-saved emittance bws tfs.
Default: ``False``
- **timber_db** *(str)*: Which timber database to use.
Choices: ``['all', 'mdb', 'ldb', 'nxcals']``
Default: ``all``
- **time_around_kicks** *(int)*: If no fill is given, this defines the time (in minutes) when data before the first and after the last kick is extracted.
Default: ``10``
- **plot_styles** *(str)*: Which plotting styles to use,
either from omc3 styles or default mpl.
Default: ``['standard']``
- **manual_style** *(DictAsString)*: Additional style rcParameters which update the set of predefined ones.
Default: ``{}``
:author: jdilly
.. _CarlierForcedDA2019: https://journals.aps.org/prab/pdf/10.1103/PhysRevAccelBeams.22.031002
"""
import os
from collections import defaultdict
from contextlib import suppress
from pathlib import Path
from typing import Tuple
import matplotlib as mpl
import matplotlib.colors as mcolors
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.transforms as mtrans
import numpy as np
import pandas as pd
import scipy.odr
import scipy.optimize
import tfs
from generic_parser import EntryPointParameters, entrypoint
from generic_parser.entry_datatypes import (
DictAsString,
FALSE_ITEMS,
TRUE_ITEMS,
get_instance_faker_meta,
get_multi_class,
)
from generic_parser.tools import DotDict
from omc3.optics_measurements import toolbox
from omc3.plotting.utils import annotations, colors, lines, style
from omc3.tune_analysis.bbq_tools import clean_outliers_moving_average
from omc3.utils import logging_tools
from omc3.utils.iotools import save_config
from omc3.utils.mock import cern_network_import
from omc3.utils.time_tools import CERNDatetime
from pandas import DataFrame, Series
from pandas.plotting import register_matplotlib_converters
from tfs import TfsDataFrame
from tfs.tools import significant_digits
pytimber = cern_network_import('pytimber')
PageStore = cern_network_import('pytimber.pagestore.PageStore')
from pylhc.constants.forced_da_analysis import (
BSRT_EMITTANCE_TO_METER,
BWS_DIRECTIONS,
BWS_EMITTANCE_TO_METER,
HEADER_BSRT_OUTLIER_LIMIT,
HEADER_BSRT_ROLLING_WINDOW,
HEADER_ENERGY,
HEADER_TIME_AFTER,
HEADER_TIME_BEFORE,
INITIAL_DA_FIT,
INTENSITY,
INTENSITY_AFTER,
INTENSITY_BEFORE,
INTENSITY_KEY,
INTENSITY_LOSSES,
KICKFILE,
MAX_CURVEFIT_FEV,
OUTFILE_INTENSITY,
OUTLIER_LIMIT,
PLOT_FILETYPES,
RESULTS_DIR,
ROLLING_AVERAGE_WINDOW,
TIME_AFTER_KICK_S,
TIME_AROUND_KICKS_MIN,
TIME_BEFORE_KICK_S,
YPAD,
bsrt_emittance_key,
bws_emittance_key,
column_action,
column_bws_norm_emittance,
column_emittance,
column_norm_emittance,
err_col,
header_da,
header_da_error,
header_nominal_emittance,
header_norm_nominal_emittance,
mean_col,
outfile_emittance,
outfile_emittance_bws,
outfile_kick,
outfile_plot,
rel_col,
sigma_col,
)
from pylhc.constants.general import (
LHC_NOMINAL_EMITTANCE,
TFS_SUFFIX,
TIME_COLUMN,
get_proton_beta,
get_proton_gamma,
)
LOG = logging_tools.get_logger(__name__)
# Weird Datatypes
class BoolOrPathOrDataFrame(
metaclass=get_instance_faker_meta(bool, Path, str, tfs.TfsDataFrame, pd.DataFrame, type(None))
):
"""
A class that behaves like a `boolean` when possible, otherwise like a `Path`, `string` or
`Dataframe`.
"""
def __new__(cls, value):
if isinstance(value, str):
value = value.strip("'\"") # behavior like dict-parser
if value in TRUE_ITEMS:
return True
elif value in FALSE_ITEMS:
return False
else:
try:
return Path(value)
except TypeError:
return value
def _get_pathclass(*other_classes):
class SomethingOrPath(metaclass=get_instance_faker_meta(Path, str, *other_classes, type(None))):
"""A class that behaves like a if possible `Path`, `string` or something else."""
def __new__(cls, value):
if isinstance(value, str):
value = value.strip("'\"") # Needs to be done for strings in config-files
try:
return Path(value)
except TypeError:
return value
return SomethingOrPath
PathOrDataframe = _get_pathclass(tfs.TfsDataFrame, pd.DataFrame)
PathOrPagestore = _get_pathclass(PageStore)
PathOrString = _get_pathclass()
def get_params():
return EntryPointParameters(
kick_directory=dict(
flags=["-k", "--kickdir"],
required=True,
type=PathOrString,
help="Analysis kick_directory containing kick files.",
),
output_directory=dict(
flags=["-o", "--outdir"],
type=PathOrString,
help="Output kick_directory, if not given subfolder in kick kick_directory",
),
energy=dict(
flags=["-e", "--energy"],
required=True,
type=get_multi_class(float, int),
help="Beam energy in GeV.",
),
fill=dict(
flags=["-f", "--fill"],
type=get_multi_class(int, type(None)),
help="Fill that was used. If not given, check out time_around_kicks.",
),
beam=dict(
flags=["-b", "--beam"], required=True, choices=[1, 2], type=int, help="Beam to use."
),
plane=dict(
flags=["-p", "--plane"],
choices=["X", "Y"],
required=True,
type=str,
help=(
"Plane of the kicks."
# " Give 'XY' for using both planes (e.g. diagonal kicks)." # Future release
),
),
time_around_kicks=dict(
type=int,
default=TIME_AROUND_KICKS_MIN,
help=(
"If no fill is given, this defines the time (in minutes) "
"when data before the first and after the last kick is extracted."
),
),
intensity_time_before_kick=dict(
type=int,
nargs=2,
default=TIME_BEFORE_KICK_S,
help=(
"Defines the times before the kicks (in seconds) "
"which is used for intensity averaging to calculate the losses."
),
),
intensity_time_after_kick=dict(
type=int,
nargs=2,
default=TIME_AFTER_KICK_S,
help=(
"Defines the times after the kicks (in seconds) "
"which is used for intensity averaging to calculate the losses."
),
),
normalized_emittance=dict(
type=float,
default=LHC_NOMINAL_EMITTANCE,
help="Assumed NORMALIZED nominal emittance for the machine.",
),
emittance_tfs=dict(
type=PathOrDataframe, help="Dataframe or Path of pre-saved emittance tfs.",
),
intensity_tfs=dict(
type=PathOrDataframe, help="Dataframe or Path of pre-saved intensity tfs.",
),
show_wirescan_emittance=dict(
default=False,
type=BoolOrPathOrDataFrame,
help=(
"Flag if the emittance from wirescan should also be shown, "
"can also be a Dataframe or Path of pre-saved emittance bws tfs."
),
),
timber_db=dict(
type=str,
default="all",
choices=["all", "mdb", "ldb", "nxcals"],
help="Which timber database to use.",
),
pagestore_db=dict(type=PathOrPagestore, help="(Path to-) presaved timber database"),
fit=dict(
type=str,
default="exponential",
choices=["exponential", "linear"],
help="Fitting function to use (rearranges parameters to make sense).",
),
emittance_window_length=dict(
help="Length of the moving average window. (# data points)",
type=int,
default=ROLLING_AVERAGE_WINDOW,
),
emittance_outlier_limit=dict(
help="Limit, i.e. cut from mean, on emittance outliers in meter.",
type=float,
default=OUTLIER_LIMIT,
),
emittance_type=dict(
type=str,
default="average",
choices=["fit_sigma", "average"],
help="Which BSRT data to use (from database).",
),
show=dict(action="store_true", help="Show plots.",),
plot_styles=dict(
type=str,
nargs="+",
default=["standard"],
help="Which plotting styles to use, either from omc3 styles or default mpl.",
),
manual_style=dict(
type=DictAsString,
default={},
help="Additional style rcParameters which update the set of predefined ones.",
),
)
@entrypoint(get_params(), strict=True)
def main(opt):
LOG.debug("Starting Forced DA analysis.")
_log_opt(opt)
kick_dir, out_dir = _get_output_dir(opt.kick_directory, opt.output_directory)
with suppress(PermissionError):
save_config(out_dir, opt, __file__)
# get data
kick_df = _get_kick_df(kick_dir, opt.plane)
intensity_df, emittance_df, emittance_bws_df = _get_dataframes(
kick_df.index,
opt.get_subdict(
[
"fill",
"beam",
"plane",
"time_around_kicks",
"emittance_tfs",
"intensity_tfs",
"show_wirescan_emittance",
"timber_db",
"pagestore_db",
"emittance_window_length",
"emittance_outlier_limit",
"emittance_type",
"normalized_emittance",
]
),
)
_check_all_times_in(kick_df.index, intensity_df.index[0], intensity_df.index[-1])
# add data to kicks
kick_df = _add_intensity_and_losses_to_kicks(
kick_df, intensity_df, opt.intensity_time_before_kick, opt.intensity_time_after_kick
)
kick_df = _add_emittance_to_kicks(
opt.plane, opt.energy, kick_df, emittance_df, opt.normalized_emittance
)
kick_df = _do_fit(opt.plane, kick_df, opt.fit)
kick_df = _convert_to_sigmas(opt.plane, kick_df)
# output
_write_tfs(out_dir, opt.plane, kick_df, intensity_df, emittance_df, emittance_bws_df)
# plotting
figs = dict()
register_matplotlib_converters() # for datetime plotting
style.set_style(opt.plot_styles, opt.manual_style)
figs["emittance"] = _plot_emittances(
out_dir, opt.beam, opt.plane, emittance_df, emittance_bws_df, kick_df.index
)
figs["intensity"] = _plot_intensity(out_dir, opt.beam, opt.plane, kick_df, intensity_df)
for fit_type in ("exponential", "linear", "norm"):
figs[f"da_fit_{fit_type}"] = _plot_da_fit(out_dir, opt.beam, opt.plane, kick_df, fit_type)
if opt.show:
plt.show()
LOG.debug("Forced DA analysis finished.")
return figs
# Helper ---
def _log_opt(opt: DotDict):
"""Show options in log."""
LOG.info("Performing ForcedDA Analysis for:")
if opt.fill is not None:
LOG.info(f" Fill: {opt.fill}")
LOG.info(f" Energy: {opt.energy} GeV")
LOG.info(f" Beam: {opt.beam}")
LOG.info(f" Plane: {opt.plane}")
LOG.info(f" Analysis Directory: '{opt.kick_directory}'")
def _write_tfs(
out_dir: Path,
plane: str,
kick_df: DataFrame,
intensity_df: DataFrame,
emittance_df: DataFrame,
emittance_bws_df: DataFrame,
):
"""Write out gathered data."""
LOG.debug("Writing tfs files.")
for df in (kick_df, intensity_df, emittance_df, emittance_bws_df):
if df is not None:
df.insert(0, TIME_COLUMN, [CERNDatetime(dt).cern_utc_string() for dt in df.index])
try:
tfs.write(out_dir / outfile_kick(plane), kick_df)
tfs.write(out_dir / OUTFILE_INTENSITY, intensity_df)
tfs.write(out_dir / outfile_emittance(plane), emittance_df)
if emittance_bws_df is not None:
tfs.write(out_dir / outfile_emittance_bws(plane), emittance_bws_df)
except (FileNotFoundError, IOError):
LOG.error(f"Cannot write into directory: {str(out_dir)} ")
def _check_all_times_in(series: Series, start: CERNDatetime, end: CERNDatetime):
"""Check if all times in series are between start and end."""
if any(s for s in series if s < start or s > end):
raise ValueError(
"Some of the kick-times are outside of the fill times! "
"Check if correct kick-file or fill number are used."
)
def _convert_time_index(list_: list, path: Path = None) -> pd.Index:
"""Tries to convert time index to cerntime, first from datetime, then string, then timestamp."""
for index_convert in (
_datetime_to_cerntime_index,
_string_to_cerntime_index,
_timestamp_to_cerntime_index,
):
with suppress(TypeError):
return index_convert(list_)
msg = f"Unrecognized format in column '{TIME_COLUMN}'"
if path:
msg += f" in '{str(path)}'"
raise TypeError(msg)
def _string_to_cerntime_index(list_):
return pd.Index((CERNDatetime.from_cern_utc_string(t) for t in list_), dtype=object)
def _timestamp_to_cerntime_index(list_):
return pd.Index((CERNDatetime.from_timestamp(t) for t in list_), dtype=object)
def _datetime_to_cerntime_index(list_):
return pd.Index((CERNDatetime(t) for t in list_), dtype=object)
def _drop_duplicate_indices(df):
duplicate_mask = [True] + [
df.index[idx] != df.index[idx - 1] for idx in range(1, len(df.index))
]
return df.loc[duplicate_mask, :]
# TFS Data Loading -------------------------------------------------------------
def _get_dataframes(
kick_times: pd.Index, opt: DotDict
) -> Tuple[TfsDataFrame, TfsDataFrame, TfsDataFrame]:
"""Gets the intensity and emittance dataframes from either input, files or (timber) database."""
db = _get_db(opt)
if opt.fill is not None:
timespan_ts = _get_fill_times(db, opt.fill)
timespan_dt = _convert_time_index(timespan_ts)
else:
td = pd.Timedelta(minutes=opt.time_around_kicks)
timespan_dt = (kick_times.min() - td, kick_times.max() + td)
timespan_ts = tuple(t.timestamp() for t in timespan_dt)
if opt.intensity_tfs:
intensity_df = _read_tfs(opt.intensity_tfs, timespan_dt)
else:
intensity_df = _get_bctrf_beam_intensity_from_timber(opt.beam, db, timespan_ts)
if opt.emittance_tfs:
emittance_df = _read_tfs(opt.emittance_tfs, timespan_dt)
else:
emittance_df = _get_bsrt_bunch_emittances_from_timber(
opt.beam, opt.plane, db, timespan_ts, opt.emittance_type, opt.normalized_emittance
)
emittance_df = _filter_emittance_data(
emittance_df, opt.plane, opt.emittance_window_length, opt.emittance_outlier_limit
)
if opt.show_wirescan_emittance is True:
emittance_bws_df = _get_bws_emittances_from_timber(opt.beam, opt.plane, db, timespan_ts)
elif opt.show_wirescan_emittance:
emittance_bws_df = _read_tfs(opt.show_wirescan_emittance, timespan_dt)
else:
emittance_bws_df = None
return intensity_df, emittance_df, emittance_bws_df
def _read_tfs(tfs_file_or_path, timespan):
"""Read previously gathered data (see :meth:`pylhc.forced_da_analysis._write_tfs`)."""
try:
tfs_df = tfs.read_tfs(tfs_file_or_path, index=TIME_COLUMN)
except IOError:
tfs_df = tfs_file_or_path # hopefully
tfs_df.index = _convert_time_index(tfs_df.index)
return tfs_df.loc[slice(*timespan), :]
def _filter_emittance_data(df, planes, window_length, limit):
"""Cleans emittance data via outlier filter and moving average."""
for plane in planes:
LOG.debug(f"Filtering emittance data in plane {plane}.")
col_nemittance = column_norm_emittance(plane)
# col_err_nemittance = err_col(col_nemittance)
col_mean = mean_col(col_nemittance)
col_err_mean = err_col(col_mean)
mav, std, mask = clean_outliers_moving_average(
df[col_nemittance], length=window_length, limit=limit
)
df[col_mean] = mav
df[col_err_mean] = std
# if any(df[col_err_nemittance]):
# df[col_err_mean] = _rolling_errors(df[col_err_nemittance], ~mask, window_length)
df = df.dropna(axis="index")
if len(df.index) == 0:
raise IndexError("Not enough emittance data extracted. Try to give a fill number.")
df.headers[HEADER_BSRT_ROLLING_WINDOW] = window_length
df.headers[HEADER_BSRT_OUTLIER_LIMIT] = limit
df = _maybe_add_sum_for_planes(df, planes, column_norm_emittance)
df = _maybe_add_sum_for_planes(
df,
planes,
lambda p: mean_col(column_norm_emittance(p)),
lambda p: err_col(mean_col(column_norm_emittance(p))),
)
return df
# Timber Data ------------------------------------------------------------------
def _get_db(opt):
"""Get the database either presaved or from timber."""
db = None
if opt.pagestore_db:
db = opt.pagestore_db
try:
db_path = Path(db)
except TypeError:
pass
else:
LOG.debug(f"Loading database from file {str(db_path)}")
db = PageStore(f"file:{str(db_path)}", str(db_path.with_suffix("")))
if opt.fill is not None:
raise EnvironmentError("'fill' can't be used with pagestore database.")
else:
LOG.debug(f" Trying to load database from timber.")
try:
db = pytimber.LoggingDB(source=opt["timber_db"])
except AttributeError:
LOG.debug(f" Loading from timber failed.")
if not db:
error_msg = ""
if opt.fill is not None:
error_msg += "'fill' is given, "
if opt.emittance_tfs is None:
error_msg += "'emittance_tfs' is not given, "
if opt.intensity_tfs is None:
error_msg += "'intensity_tfs' is not given, "
if opt.show_wirescan_emittance is True:
error_msg += "wirescan emittance is requested, "
if len(error_msg):
error_msg += (
"but there is no database given and no access to timber databases. Aborting."
)
raise EnvironmentError(error_msg)
return db
def _get_fill_times(db, fill):
"""Extract Fill times from database."""
LOG.debug(f"Getting Timespan from fill {fill}")
filldata = db.getLHCFillData(fill)
return filldata["startTime"], filldata["endTime"]
def _get_bctrf_beam_intensity_from_timber(beam, db, timespan):
LOG.debug(f"Getting beam intensity from bctfr for beam {beam}.")
intensity_key = INTENSITY_KEY.format(beam=beam)
LOG.debug(f" Key: {intensity_key}")
x, y = db.get(intensity_key, *timespan)[intensity_key]
df = tfs.TfsDataFrame(
data=y, index=_timestamp_to_cerntime_index(x), columns=[INTENSITY], dtype=float
)
df = _drop_duplicate_indices(df)
LOG.debug(f" Returning dataframe of shape {df.shape}")
return df
def _get_bsrt_bunch_emittances_from_timber(beam, planes, db, timespan, key_type, nominal_emittance):
dfs = {p: None for p in planes}
for plane in planes:
LOG.debug(f"Getting emittance from BSRT for beam {beam} and plane {plane}.")
bunch_emittance_key = bsrt_emittance_key(beam, plane, key_type)
LOG.debug(f" Key: {bunch_emittance_key}")
col_nemittance = column_norm_emittance(plane)
all_columns = [f(col_nemittance) for f in (lambda s: s, mean_col, err_col)] + [
err_col(mean_col(col_nemittance))
]
x, y = db.get(bunch_emittance_key, *timespan)[bunch_emittance_key]
y_std = np.zeros_like(x)
if key_type == "fit_sigma":
# add all data with the same timestamp
y_new = defaultdict(list)
for x_elem, y_elem in zip(x, y):
y_new[f"{x_elem:.3f}"] += y_elem.tolist()
# get average and std per timestamp
x = np.array([float(elem) for elem in y_new.keys()])
y = np.array([np.average(elem) for elem in y_new.values()]) * nominal_emittance
y_std = np.array([np.std(elem) for elem in y_new.values()]) * nominal_emittance
elif key_type == "average":
y *= BSRT_EMITTANCE_TO_METER
y_std *= BSRT_EMITTANCE_TO_METER
# remove entries with zero emittance as unphysical
x, y, y_std = x[y != 0], y[y != 0], y_std[y != 0]
df = tfs.TfsDataFrame(
index=_timestamp_to_cerntime_index(x), columns=all_columns, dtype=float,
)
df[col_nemittance] = y
df[err_col(col_nemittance)] = y_std
dfs[plane] = df
df = _merge_df_planes(dfs, planes)
LOG.debug(f" Returning dataframe of shape {df.shape}")
return df
def _get_bws_emittances_from_timber(beam, planes, db, timespan):
dfs = {p: None for p in planes}
for plane in planes:
LOG.debug(f"Getting emittance from BWS for beam {beam} and plane {plane}.")
all_columns = [column_bws_norm_emittance(plane, d) for d in BWS_DIRECTIONS]
df = None
for direction in BWS_DIRECTIONS:
emittance_key = bws_emittance_key(beam, plane, direction)
LOG.debug(f" Key: {emittance_key}")
column_nemittance = column_bws_norm_emittance(plane, direction)
x, y = db.get(emittance_key, *timespan)[emittance_key]
if df is None:
df = tfs.TfsDataFrame(
index=_timestamp_to_cerntime_index(x), columns=all_columns, dtype=float
)
df[column_nemittance] = y * BWS_EMITTANCE_TO_METER
df[column_nemittance] = df[column_nemittance].apply(
np.mean
) # BWS can give multiple values
df[err_col(column_nemittance)] = df[column_nemittance].apply(
np.std
) # BWS can give multiple values
dfs[plane] = df
df = _merge_df_planes(dfs, planes)
for direction in BWS_DIRECTIONS:
df = _maybe_add_sum_for_planes(
df,
planes,
lambda p: column_bws_norm_emittance(p, direction),
lambda p: err_col(column_bws_norm_emittance(p, direction)),
)
LOG.debug(f" Returning dataframe of shape {df.shape}")
return df
# Kick Data --------------------------------------------------------------------
def _get_kick_df(kick_dir, plane):
def column_action_error(x):
return err_col(column_action(x))
try:
df = _get_new_kick_file(kick_dir, plane)
except FileNotFoundError:
LOG.debug("Reading of kickfile failed. Looking for old kickfile.")
df = _get_old_kick_file(kick_dir, plane)
df = _maybe_add_sum_for_planes(df, plane, column_action, column_action_error)
return df[[column_action(plane), column_action_error(plane)]]
def _get_old_kick_file(kick_dir, plane):
"""Kick files from ``Beta-Beat.src``."""
path = kick_dir / "getkickac.out"
LOG.debug(f"Reading kickfile '{str(path)}'.'")
df = tfs.read(path)
df = df.set_index(TIME_COLUMN)
df.index = _convert_time_index(df.index, path)
rename_dict = {}
for p in plane: # can be XY
rename_dict.update(
{
f"2J{p}RES": column_action(p),
f"2J{p}STDRES": err_col(column_action(p)),
f"J{p}2": column_action(p), # pre 2017
f"J{p}STD": err_col(column_action(p)), # pre 2017
}
)
df = df.rename(rename_dict, axis="columns")
renamed_cols = list(set(rename_dict.values()))
df.loc[:, renamed_cols] = df.loc[:, renamed_cols] * 1e-6
return df
def _get_new_kick_file(kick_dir, planes):
"""Kick files from ``omc3``."""
dfs = {p: None for p in planes}
for plane in planes:
path = kick_dir / f"{KICKFILE}_{plane.lower()}{TFS_SUFFIX}"
LOG.debug(f"Reading kickfile '{str(path)}'.'")
df = tfs.read(path, index=TIME_COLUMN)
df.index = pd.Index([CERNDatetime.from_cern_utc_string(t) for t in df.index], dtype=object)
dfs[plane] = df
return _merge_df_planes(dfs, planes)
def _get_output_dir(kick_directory, output_directory):
kick_path = Path(kick_directory)
if output_directory:
output_path = Path(output_directory)
else:
output_path = kick_path / RESULTS_DIR
try:
output_path.mkdir(exist_ok=True)
except PermissionError:
LOG.warn(
f"You have no writing permission in '{str(output_path)}', "
f"output data might not be created."
)
LOG.info(f"All output will be written to {str(output_path)}")
return kick_path, output_path
# Intensity at Kicks -----------------------------------------------------------
def _add_intensity_and_losses_to_kicks(kick_df, intensity_df, time_before, time_after):
LOG.debug("Calculating intensity and losses for the kicks.")
col_list = [INTENSITY_BEFORE, INTENSITY_AFTER, INTENSITY_LOSSES]
new_columns = [col for col in col_list + [err_col(c) for c in col_list]]
kick_df = kick_df.reindex(columns=kick_df.columns.tolist() + new_columns)
kick_df = _get_intensities_around_kicks(kick_df, intensity_df, time_before, time_after)
kick_df = _calculate_intensity_losses_at_kicks(kick_df)
return kick_df
def _get_intensities_around_kicks(kick_df, intensity_df, time_before, time_after):
LOG.debug("Calculating beam intensity before and after kicks.")
# input signs and order does not matter
time_before = sorted(-np.abs(t) for t in time_before)
time_after = sorted(np.abs(t) for t in time_after)
kick_df.headers[HEADER_TIME_BEFORE] = str(time_before)
kick_df.headers[HEADER_TIME_AFTER] = str(time_after)
for i, time in enumerate(kick_df.index):
# calculate intensity before and after kicks (with error)
for column, time_delta in ((INTENSITY_BEFORE, time_before), (INTENSITY_AFTER, time_after)):
t_from, t_to = (
time + pd.Timedelta(seconds=time_delta[0]),
time + pd.Timedelta(seconds=time_delta[1]),
)
data = intensity_df.loc[
t_from:t_to, INTENSITY
] # awesome pandas can handle time intervals!
kick_df.loc[time, [column, err_col(column)]] = data.mean(), data.std()
return kick_df
def _calculate_intensity_losses_at_kicks(kick_df):
LOG.debug("Calculating intensity losses.")
# absolute losses
kick_df[INTENSITY_LOSSES] = kick_df[INTENSITY_BEFORE] - kick_df[INTENSITY_AFTER]
kick_df[err_col(INTENSITY_LOSSES)] = np.sqrt(
np.square(kick_df[err_col(INTENSITY_BEFORE)]) + np.square(kick_df[err_col(INTENSITY_AFTER)])
)
# relative losses, error from error-propagation formular for losses / I_before = 1 - I_after / I_before
kick_df[rel_col(INTENSITY_LOSSES)] = kick_df[INTENSITY_LOSSES] / kick_df[INTENSITY_BEFORE]
kick_df[rel_col(err_col(INTENSITY_LOSSES))] = np.sqrt(
np.square(kick_df[INTENSITY_AFTER] / kick_df[INTENSITY_BEFORE])
* (
np.square(kick_df[err_col(INTENSITY_AFTER)] / kick_df[INTENSITY_AFTER])
+ np.square(kick_df[err_col(INTENSITY_BEFORE)] / kick_df[INTENSITY_BEFORE])
)
)
return kick_df
# Emittance at Kicks -----------------------------------------------------------
def _add_emittance_to_kicks(plane, energy, kick_df, emittance_df, nominal):
LOG.debug("Retrieving normalized emittance at the kicks.")
kick_df.headers[HEADER_ENERGY] = energy
kick_df.headers[HEADER_BSRT_ROLLING_WINDOW] = ROLLING_AVERAGE_WINDOW
col_nemittance = column_norm_emittance(plane)
cols_emitt = [mean_col(col_nemittance), err_col(mean_col(col_nemittance))]
cols_kick = [col_nemittance, err_col(col_nemittance)]
kick_df = kick_df.reindex(columns=kick_df.columns.tolist() + cols_kick)
idx_emitt = [emittance_df.columns.get_loc(c) for c in cols_emitt]
for time in kick_df.index:
idx_kick = emittance_df.index.get_loc(time, method="nearest")
kick_df.loc[time, cols_kick] = emittance_df.iloc[idx_kick, idx_emitt].values
# add de-normalized emittance
normalization = get_proton_gamma(energy) * get_proton_beta(
energy
) # norm emittance to emittance
col_emittance = column_emittance(plane)
kick_df.headers[header_norm_nominal_emittance(plane)] = nominal
kick_df.headers[header_nominal_emittance(plane)] = nominal / normalization
kick_df[col_emittance] = kick_df[col_nemittance] / normalization
kick_df[err_col(col_emittance)] = kick_df[err_col(col_nemittance)] / normalization
return kick_df
# Forced DA Fitting ------------------------------------------------------------
def fun_exp_decay(p, x): # fit and plot
"""sp = DA_J, x[0] = action (2J res), x[1] = emittance"""
return np.exp(-(p - (0.5 * x[0])) / x[1])
def fun_exp_sigma(p, x): # only used for plotting
"""p = DA_sigma, x = action (J_sigma)"""
return np.exp(-0.5 * (p ** 2 - x ** 2))
def fun_linear(p, x): # fit and plot
"""p = DA_J, x = action (2J res)"""
return x * 0.5 - p
def swap_fun_parameters(fun):
"""Parameter swapped for Curvefit."""
return lambda x, p: fun(p, x)
def _do_fit(plane, kick_df, fit_type):
LOG.debug("Fitting forced da to exponential. ")
action, emittance, rel_losses = _get_fit_data(kick_df, plane)
init_guess = [INITIAL_DA_FIT * kick_df.headers[header_nominal_emittance(plane)]]
get_fit_param = {"linear": _linear_fit_parameters, "exponential": _exponential_fit_parameters}[
fit_type
]
fit_fun, x, y, sx, sy = get_fit_param(action, emittance, rel_losses)
# do prelim fit
init_fit, _ = _fit_curve(swap_fun_parameters(fit_fun), x, y, init_guess)
# do odr
odr = _fit_odr(fit_fun, x, y, sx, sy, init_fit)
# add DA to kick
da = odr.beta[0], odr.sd_beta[0]
kick_df.headers[header_da(plane)], kick_df.headers[header_da_error(plane)] = da
LOG.info(f"Forced DA (wrt. J) in {plane} [m]: {da[0]} ± {da[1]}")
return kick_df
def _get_fit_data(kick_df, plane):
"""Extracts necessary data from ``kick-df``. Returns tri-tuple of tuples (data, std)."""
col_action = column_action(plane)
col_emittance = column_emittance(plane)
col_losses = rel_col(INTENSITY_LOSSES)
# get data
action = kick_df[col_action], _no_nonzero_errors(kick_df[err_col(col_action)])
emittance = kick_df[col_emittance], _no_nonzero_errors(kick_df[err_col(col_emittance)])
rel_losses = kick_df[col_losses], _no_nonzero_errors(kick_df[err_col(col_losses)])
return action, emittance, rel_losses
def _exponential_fit_parameters(action, emittance, rel_losses):
"""Returns exponential fit function and parameters. All inputs are tuples of (data, std)."""
x = action[0], emittance[0]
y = rel_losses[0]
sx = [action[1], emittance[1]]
sy = rel_losses[1]
return fun_exp_decay, x, y, sx, sy
def _linear_fit_parameters(action, emittance, rel_losses):
"""
Returns linear fit function and parameters. All inputs are tuples of (data, std)."""
log_losses =
|
np.log(rel_losses[0])
|
numpy.log
|
# Copyright © VASP Software GmbH,
# Licensed under the Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import itertools
import numpy as np
from numpy.testing import assert_array_almost_equal_nulp
import pytest
import py4vasp.exceptions as exception
import py4vasp.raw as raw
number_steps = 4
number_atoms = 7
number_points = 50
number_bands = 3
single_spin = 1
two_spins = 2
axes = 3
complex_ = 2
class _Assert:
@staticmethod
def allclose(actual, desired):
if actual is None:
assert desired is None
else:
actual, desired = np.broadcast_arrays(actual, desired)
actual, mask_actual = _finite_subset(actual)
desired, mask_desired = _finite_subset(desired)
assert np.all(mask_actual == mask_desired)
assert_array_almost_equal_nulp(actual, desired, 10)
def _finite_subset(array):
array = np.atleast_1d(array)
mask = np.isfinite(array)
return array[mask], mask
@pytest.fixture(scope="session")
def Assert():
return _Assert
class RawDataFactory:
@staticmethod
def band(selection):
band, *options = selection.split()
options = options[0] if len(options) > 0 else None
if band == "single":
return _single_band(options)
elif band == "multiple":
return _multiple_bands(options)
elif band == "line":
return _line_band(options)
elif band == "spin_polarized":
return _spin_polarized_bands(options)
else:
raise exception.NotImplemented()
@staticmethod
def born_effective_charge(selection):
if selection == "Sr2TiO4":
return _Sr2TiO4_born_effective_charges()
else:
raise exception.NotImplemented()
@staticmethod
def density(selection):
return _Fe3O4_density(selection)
@staticmethod
def dielectric_function(selection):
if selection == "electron":
return _electron_dielectric_function()
elif selection == "ion":
return _ion_dielectric_function()
else:
raise exception.NotImplemented()
@staticmethod
def dielectric_tensor(selection):
return _dielectric_tensor(selection)
@staticmethod
def dos(selection):
structure, *projectors = selection.split()
projectors = projectors[0] if len(projectors) > 0 else "no_projectors"
if structure == "Sr2TiO4":
return _Sr2TiO4_dos(projectors)
elif structure == "Fe3O4":
return _Fe3O4_dos(projectors)
else:
raise exception.NotImplemented()
@staticmethod
def elastic_modulus(selection):
return _elastic_modulus()
@staticmethod
def energy(selection):
return _energy()
@staticmethod
def force_constant(selection):
if selection == "Sr2TiO4":
return _Sr2TiO4_force_constants()
else:
raise exception.NotImplemented()
@staticmethod
def force(selection):
if selection == "Sr2TiO4":
return _Sr2TiO4_forces()
elif selection == "Fe3O4":
return _Fe3O4_forces()
else:
raise exception.NotImplemented()
@staticmethod
def internal_strain(selection):
if selection == "Sr2TiO4":
return _Sr2TiO4_internal_strain()
else:
raise exception.NotImplemented()
@staticmethod
def kpoint(selection):
mode, *labels = selection.split()
labels = labels[0] if len(labels) > 0 else "no_labels"
if mode[0] in ["l", b"l"[0]]:
return _line_kpoints(mode, labels)
else:
return _grid_kpoints(mode, labels)
@staticmethod
def magnetism(selection):
return _magnetism(_number_components(selection))
@staticmethod
def pair_correlation(selection):
return _Sr2TiO4_pair_correlation()
@staticmethod
def piezoelectric_tensor(selection):
return _piezoelectric_tensor()
@staticmethod
def polarization(selection):
return _polarization()
@staticmethod
def projector(selection):
if selection == "Sr2TiO4":
return _Sr2TiO4_projectors()
elif selection == "Fe3O4":
return _Fe3O4_projectors()
else:
raise exception.NotImplemented()
@staticmethod
def stress(selection):
if selection == "Sr2TiO4":
return _Sr2TiO4_stress()
elif selection == "Fe3O4":
return _Fe3O4_stress()
else:
raise exception.NotImplemented()
@staticmethod
def structure(selection):
if selection == "Sr2TiO4":
return _Sr2TiO4_structure()
elif selection == "Fe3O4":
return _Fe3O4_structure()
else:
raise exception.NotImplemented()
@staticmethod
def topology(selection):
if selection == "Sr2TiO4":
return _Sr2TiO4_topology()
elif selection == "Fe3O4":
return _Fe3O4_topology()
else:
raise exception.NotImplemented()
@pytest.fixture
def raw_data():
return RawDataFactory
def _number_components(selection):
if selection == "collinear":
return 2
elif selection == "noncollinear":
return 4
elif selection == "charge_only":
return 1
else:
raise exception.NotImplemented()
def _electron_dielectric_function():
shape = (2, axes, axes, number_points, complex_)
data = np.linspace(0, 1, np.prod(shape)).reshape(shape)
return raw.RawDielectricFunction(
energies=np.linspace(0, 1, number_points),
density_density=data[0],
current_current=data[1],
ion=None,
)
def _ion_dielectric_function():
shape = (axes, axes, number_points, complex_)
return raw.RawDielectricFunction(
energies=np.linspace(0, 1, number_points),
density_density=None,
current_current=None,
ion=np.linspace(0, 1, np.prod(shape)).reshape(shape),
)
def _dielectric_tensor(method):
shape = (3, axes, axes)
data = np.arange(np.prod(shape)).reshape(shape)
return raw.RawDielectricTensor(
electron=data[0],
ion=data[1],
independent_particle=data[2] if method in ("dft", "rpa") else None,
method=method.encode(),
)
def _elastic_modulus():
shape = (2, axes, axes, axes, axes)
data = np.arange(np.prod(shape)).reshape(shape)
return raw.RawElasticModulus(
clamped_ion=data[0],
relaxed_ion=data[1],
)
def _Sr2TiO4_pair_correlation():
labels = ("total", "Sr~Sr", "Sr~Ti", "Sr~O", "Ti~Ti", "Ti~O", "O~O")
shape = (number_steps, len(labels), number_points)
data = np.arange(np.prod(shape)).reshape(shape)
return raw.PairCorrelation(
distances=np.arange(number_points),
function=data,
labels=labels,
)
def _piezoelectric_tensor():
shape = (2, axes, axes, axes)
data = np.arange(np.prod(shape)).reshape(shape)
return raw.RawPiezoelectricTensor(
electron=data[0],
ion=data[1],
)
def _polarization():
return raw.RawPolarization(electron=np.array((1, 2, 3)), ion=np.array((4, 5, 6)))
def _energy():
labels = ("ion-electron TOTEN ", "kinetic energy EKIN", "temperature TEIN")
labels = np.array(labels, dtype="S")
shape = (number_steps, len(labels))
return raw.RawEnergy(
labels=labels,
values=np.arange(np.prod(shape)).reshape(shape),
)
def _line_kpoints(mode, labels):
line_length = 5
GM = [0, 0, 0]
Y = [0.5, 0.5, 0.0]
A = [0, 0, 0.5]
M = [0.5, 0.5, 0.5]
coordinates = (
np.linspace(GM, A, line_length),
np.linspace(A, M, line_length),
np.linspace(GM, Y, line_length),
np.linspace(Y, M, line_length),
)
kpoints = raw.RawKpoint(
mode=mode,
number=line_length,
coordinates=np.concatenate(coordinates),
weights=np.ones(len(coordinates)),
cell=_Sr2TiO4_cell(),
)
if labels == "with_labels":
kpoints.labels = [r"$\Gamma$", " M ", r"$\Gamma$", "Y", "M"]
kpoints.label_indices = [1, 4, 5, 7, 8]
return kpoints
def _grid_kpoints(mode, labels):
x = np.linspace(0, 1, 4, endpoint=False)
y = np.linspace(0, 1, 3, endpoint=False)
z = np.linspace(0, 1, 4, endpoint=False) + 1 / 8
coordinates = np.array(list(itertools.product(x, y, z)))
number_kpoints = len(coordinates) if mode[0] in ["e", b"e"[0]] else 0
kpoints = raw.RawKpoint(
mode=mode,
number=number_kpoints,
coordinates=coordinates,
weights=np.arange(len(coordinates)),
cell=_Sr2TiO4_cell(),
)
if labels == "with_labels":
kpoints.labels = ["foo", b"bar", "baz"]
kpoints.label_indices = [9, 25, 40]
return kpoints
def _magnetism(number_components):
lmax = 3
shape = (number_steps, number_components, number_atoms, lmax)
return raw.RawMagnetism(
structure=_Fe3O4_structure(), moments=np.arange(np.prod(shape)).reshape(shape)
)
def _single_band(projectors):
kpoints = _grid_kpoints("explicit", "no_labels")
return raw.RawBand(
fermi_energy=0.0,
eigenvalues=np.array([np.linspace([0], [1], len(kpoints.coordinates))]),
occupations=np.array([np.linspace([1], [0], len(kpoints.coordinates))]),
kpoints=kpoints,
)
def _multiple_bands(projectors):
kpoints = _grid_kpoints("explicit", "no_labels")
shape = (single_spin, len(kpoints.coordinates), number_bands)
raw_band = raw.RawBand(
fermi_energy=0.5,
eigenvalues=np.arange(np.prod(shape)).reshape(shape),
occupations=np.arange(np.prod(shape)).reshape(shape),
kpoints=kpoints,
)
if projectors == "with_projectors":
raw_band.projectors = _Sr2TiO4_projectors()
number_orbitals = len(raw_band.projectors.orbital_types)
shape = (single_spin, number_atoms, number_orbitals, *shape[1:])
raw_band.projections = np.random.random(shape)
return raw_band
def _line_band(labels):
kpoints = _line_kpoints("line", labels)
shape = (single_spin, len(kpoints.coordinates), number_bands)
return raw.RawBand(
fermi_energy=0.5,
eigenvalues=np.arange(np.prod(shape)).reshape(shape),
occupations=np.arange(np.prod(shape)).reshape(shape),
kpoints=kpoints,
)
def _spin_polarized_bands(projectors):
kpoints = _grid_kpoints("explicit", "no_labels")
kpoints.cell = _Fe3O4_cell()
shape = (two_spins, len(kpoints.coordinates), number_bands)
raw_band = raw.RawBand(
fermi_energy=0.0,
eigenvalues=np.arange(np.prod(shape)).reshape(shape),
occupations=np.arange(np.prod(shape)).reshape(shape),
kpoints=kpoints,
)
if projectors in ["with_projectors", "excess_orbitals"]:
raw_band.projectors = _Fe3O4_projectors()
number_orbitals = len(raw_band.projectors.orbital_types)
shape = (two_spins, number_atoms, number_orbitals, *shape[1:])
raw_band.projections = np.random.random(shape)
if projectors == "excess_orbitals":
old_orbitals = raw_band.projectors.orbital_types
new_orbitals = np.array(["g", "h", "i"], dtype="S")
expanded_orbital_types = np.concatenate((old_orbitals, new_orbitals))
raw_band.projectors.orbital_types = expanded_orbital_types
return raw_band
def _Sr2TiO4_born_effective_charges():
shape = (number_atoms, axes, axes)
return raw.RawBornEffectiveCharge(
structure=_Sr2TiO4_structure(),
charge_tensors=np.arange(np.prod(shape)).reshape(shape),
)
def _Sr2TiO4_cell():
scale = 6.9229
lattice_vectors = [
[1.0, 0.0, 0.0],
[0.678112209738693, 0.734958387251008, 0.0],
[-0.839055341042049, -0.367478859090843, 0.401180037874301],
]
return raw.RawCell(
lattice_vectors=scale * np.array(number_steps * [lattice_vectors]),
scale=scale,
)
def _Sr2TiO4_dos(projectors):
energies = np.linspace(-1, 3, number_points)
raw_dos = raw.RawDos(
fermi_energy=1.372,
energies=energies,
dos=np.array([energies ** 2]),
)
if projectors == "with_projectors":
raw_dos.projectors = _Sr2TiO4_projectors()
number_orbitals = len(raw_dos.projectors.orbital_types)
shape = (single_spin, number_atoms, number_orbitals, number_points)
raw_dos.projections = np.random.random(shape)
return raw_dos
def _Sr2TiO4_force_constants():
shape = (axes * number_atoms, axes * number_atoms)
return raw.RawForceConstant(
structure=_Sr2TiO4_structure(),
force_constants=np.arange(np.prod(shape)).reshape(shape),
)
def _Sr2TiO4_forces():
shape = (number_steps, number_atoms, axes)
return raw.RawForce(
structure=_Sr2TiO4_structure(),
forces=np.arange(np.prod(shape)).reshape(shape),
)
def _Sr2TiO4_internal_strain():
shape = (number_atoms, axes, axes, axes)
return raw.RawInternalStrain(
structure=_Sr2TiO4_structure(),
internal_strain=np.arange(np.prod(shape)).reshape(shape),
)
def _Sr2TiO4_projectors():
orbital_types = "s py pz px dxy dyz dz2 dxz x2-y2 fy3x2 fxyz fyz2 fz3 fxz2 fzx2 fx3"
return raw.RawProjector(
topology=_Sr2TiO4_topology(),
orbital_types=np.array(orbital_types.split(), dtype="S"),
number_spins=1,
)
def _Sr2TiO4_stress():
shape = (number_steps, axes, axes)
return raw.RawStress(
structure=_Sr2TiO4_structure(),
stress=np.arange(np.prod(shape)).reshape(shape),
)
def _Sr2TiO4_structure():
repetitions = (number_steps, 1, 1)
positions = [
[0.64529, 0.64529, 0.0],
[0.35471, 0.35471, 0.0],
[0.00000, 0.00000, 0.0],
[0.84178, 0.84178, 0.0],
[0.15823, 0.15823, 0.0],
[0.50000, 0.00000, 0.5],
[0.00000, 0.50000, 0.5],
]
return raw.RawStructure(
topology=_Sr2TiO4_topology(),
cell=_Sr2TiO4_cell(),
positions=np.tile(positions, repetitions),
)
def _Sr2TiO4_topology():
return raw.RawTopology(
number_ion_types=np.array((2, 1, 4)),
ion_types=np.array(("Sr", "Ti", "O "), dtype="S"),
)
def _Fe3O4_cell():
lattice_vectors = [
[5.1427, 0.0, 0.0],
[0.0, 3.0588, 0.0],
[-1.3633791448, 0.0, 5.0446102592],
]
scaling = np.linspace(0.98, 1.01, number_steps)
return raw.RawCell(lattice_vectors=np.multiply.outer(scaling, lattice_vectors))
def _Fe3O4_density(selection):
parts = selection.split()
structure = RawDataFactory.structure(parts[0])
grid = (_number_components(parts[1]), 10, 12, 14)
return raw.RawDensity(
structure=structure,
charge=np.arange(
|
np.prod(grid)
|
numpy.prod
|
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
kernel = np.ones((5,5),np.uint8)
ESCALA = 1.6
FLAGS = 100
RESOLUCION = (360, 270)
FORMATO = "bgr"
BREAK = 100
FRAMERATE = 15
COLOR_CUADRADO = (0,255,0)
COLOR_CIRCULO = (255,0,0)
def nothing(x):
pass
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = RESOLUCION
camera.framerate = FRAMERATE
camera.rotation = 180
camera.video_denoise = False
camera.image_effect = 'none'
camera.meter_mode = 'spot'
rawCapture = PiRGBArray(camera, size=RESOLUCION)
# Creating a windows for later use
cv2.namedWindow('HueComp')
cv2.namedWindow('SatComp')
cv2.namedWindow('ValComp')
cv2.namedWindow('closing')
cv2.namedWindow('tracking')
# Creating track bar for min and max for hue, saturation and value
# You can adjust the defaults as you like
cv2.createTrackbar('hmin', 'HueComp',12,179,nothing)
cv2.createTrackbar('hmax', 'HueComp',37,179,nothing)
cv2.createTrackbar('smin', 'SatComp',96,255,nothing)
cv2.createTrackbar('smax', 'SatComp',255,255,nothing)
cv2.createTrackbar('vmin', 'ValComp',186,255,nothing)
cv2.createTrackbar('vmax', 'ValComp',255,255,nothing)
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for image in camera.capture_continuous(rawCapture, format=FORMATO, use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
frame = image.array
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
hue,sat,val = cv2.split(hsv)
# get info from track bar and appy to result
hmn = cv2.getTrackbarPos('hmin','HueComp')
hmx = cv2.getTrackbarPos('hmax','HueComp')
smn = cv2.getTrackbarPos('smin','SatComp')
smx = cv2.getTrackbarPos('smax','SatComp')
vmn = cv2.getTrackbarPos('vmin','ValComp')
vmx = cv2.getTrackbarPos('vmax','ValComp')
# Apply thresholding
hthresh = cv2.inRange(np.array(hue),np.array(hmn),np.array(hmx))
sthresh = cv2.inRange(np.array(sat),
|
np.array(smn)
|
numpy.array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.