prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
###Determining if news is "fake" via NLP & a NN (99% Accuracy)
###Dataset: https://www.kaggle.com/clmentbisaillon/fake-and-real-news-dataset
#Base package imports
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
from bs4 import BeautifulSoup #for pulling data out of html and xml files
import re, string,unicodedata
from string import punctuation
#Natural language toolkit imports
import nltk
from nltk.corpus import stopwords #words to be ignored
nltk.download('stopwords')
from nltk.stem.porter import PorterStemmer #used produce variants of a root/base word
from nltk.stem import WordNetLemmatizer #used find the root of a word variant (lemma)
from nltk.tokenize import word_tokenize,sent_tokenize #strips strings into tokens
#tokenizing text represents every word with a number, changes all chr to lower case
from nltk.tokenize.toktok import ToktokTokenizer #tokenizes final period only
from nltk import pos_tag #tags words
from nltk.corpus import wordnet #English language database
#Tensorflow & Keras imports
import tensorflow as tf
import keras
from keras.preprocessing import text, sequence
from keras.models import Sequential
from keras.layers import Dense,Embedding,LSTM,Dropout
from keras.callbacks import ReduceLROnPlateau
#plain stack of layers, each layer has one input and one output tensor (Sequential)
#regular deeply connected nn layer (Dense)
#a dense vector rep for words (Embedding)
#in this case a vector represents the projection of the word into a continuous vector space
#the position of the word in the learned vector space is its embedding
#lstm layer (long short-term memory layer) chooses different implementations
#to maximize performance of the NN
#dropout layer helps prevent overfitting by randomly setting input units to 0 at each step during training
#ReduceLROnPlateau reduces the learning rate when the metric has stopped improving
#Sklearn imports
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from sklearn.model_selection import train_test_split
#Loading in datasets
true = pd.read_csv("True.csv")
false = pd.read_csv("Fake.csv")
#Taking a look at the data
#True data
true.head()
#False data
false.head()
#Visualizing breakdown by subject (Fake News)
plt.figure(figsize=(20,10))
sns.countplot('subject',data=true)
plt.show()
print(true.subject.value_counts())
#Visualizing breakdown by subject (Real News)
plt.figure(figsize=(20,10))
sns.countplot('subject',data=false)
plt.show()
print(false.subject.value_counts())
#Merging the two datasets together
true['category'] = 1
false['category'] = 0
df = pd.concat([true,false])
df.head()
#Visualizing breakdown by subject (Real News)
plt.figure(figsize=(20,10))
sns.countplot('category',data=df)
plt.show()
#Checking for missing values
df.isna().sum()
#Topics in subject col are diff for both categories, thus we exclude
df['text'] = df['text'] + " " + df['title']
del df['title']
del df['subject']
del df['date']
#Setting stopwords (English)
stop_words = set(stopwords.words('english'))
punctuation = list(string.punctuation)
stop_words.update(punctuation)
#Cleaning the data
#Defining a function to strip html format text
def html_strip(text):
soup = BeautifulSoup(text,'html.parser') #using BeautifulSoup parser
return soup.get_text()
#Defining a function to strip text between square brackets
def square_bracket_strip(text):
return re.sub('\[[^]]*\]','',text)
#Defining a function to strip url formatted text
def url_strip(text):
return re.sub(r'http\S+', '', text)
#Defining a function to identify and remove stopwords
def remove_stopwords(text):
final_text = []
for i in text.split():
if i.strip().lower() not in stop_words:
final_text.append(i.strip())
return " ".join(final_text)
#Defining a function to remove any noise in the text
def remove_noise(text):
text = strip_html(text)
text = remove_between_square_brackets(text)
text = remove_stopwords(text)
return text
df['text'] = df['text'].apply(denoise_text)
#Performing train-validation split
X_train, X_val, y_train, y_val = train_test_split(df.text, df.category, random_state = 7,test_size = 0.30)
#Tokenizing text data
#Defining tokenizer
tokenizer = text.Tokenizer(num_words = max_features)
#Tokenizing X_train
tokenizer.fit_on_texts(X_train)
tokenized_train = tokenizer.texts_to_sequences(X_train)
X_train = sequence.pad_sequences(tokenized_train,maxlen = maxlen)
#Tokenizing X_val
tokenized_test = tokenizer.texts_to_sequences(X_val)
X_val = sequence.pad_sequences(tokenized_test, maxlen=maxlen)
#Using the GloVe Method, ie Global vectors for word representation
#Glove derives semantic relationships between words from the co-occurence matrix
#Need three words a time to measure the semantic similarity between words
#ie P(k|ice), P(k|steam) & P(k|ice)/P(k|steam)
#Using pre-trained twitter data from: https://nlp.stanford.edu/projects/glove/
embedding_file = 'glove.6B.100d.txt'
#Defining a function to create an array of word coefs
def get_coefs(word, *arr):
return word,
|
np.asarray(arr, dtype='float32')
|
numpy.asarray
|
from camera import CAMERA
from yolo_model import BoundBox, YOLO
from utils.bbox import bbox_iou
from lane import LaneLineFinder, get_center_shift, get_curvature
import numpy as np
import cv2
from datetime import datetime
from PIL import Image
# yolo_detector = YOLO(score = 0.3, iou = 0.5, gpu_num = 0)
WHITE = (255, 255, 255)
YELLOW = (66, 244, 238)
GREEN = (80, 220, 60)
LIGHT_CYAN = (255, 255, 224)
DARK_BLUE = (139, 0, 0)
GRAY = (128, 128, 128)
RED = (0,0,255)
ORANGE =(0,165,255)
vehicles = [1,2,3,5,6,7,8]
animals =[15,16,17,18,19,21,22,23,]
humans =[0]
obstructions = humans + animals + vehicles
classes = [#
'Ped','bicycle','car','motorbike','aeroplane','bus',\
'train','truck','boat','traffic light','fire hydrant','stop sign',\
'parking meter','bench','bird','cat','dog','horse',\
'sheep','cow','elephant', 'bear','zebra','giraffe',\
'backpack','umbrella','handbag','tie','suitcase','frisbee',\
'skis','snowboard','sports ball','kite','baseball bat',\
'baseball glove','skateboard','surfboard','tennis racket','bottle','wine glass',\
'cup','fork','knife','spoon','bowl','banana',\
'apple','sandwich','orange','broccoli','carrot','hot dog',\
'pizza','donut','cake','chair','sofa','pottedplant',\
'bed','diningtable','toilet','tvmonitor','laptop','mouse',\
'remote','keyboard','cell phone','microwave','oven','toaster',\
'sink','refrigerator','book','clock','vase','scissors',\
'teddy bear','hair drier','toothbrush' ]
class OBSTACLE(BoundBox):
xmax :int
xmin :int
ymin :int
ymax :int
xmid :int
ymid :int
lane : str
tracker = None
position : [int,int]
PERIOD = 5
__count = 0
def __init__(self,box: BoundBox,dst, _id) :
self.col_time:float =999.0
self._id = _id
self.update_coord(box)
self.update_score(box)
self.history : np.ndarray = []
self.position_hist = []
self.velocity = np.zeros((2))
self.position = dst
self.score=box.score
self.label = box.label
def update_obstacle(self, box: BoundBox, dst, fps) :
self.position_hist.append((self.xmin, self.ymin, self.xmax,self.ymax))
self.update_coord(box)
old_loc = self.position
self.history.append(old_loc)
self.col_time = min(dst[1]/(self.velocity[1]+0.001),99)
if self.__count % self.PERIOD == 0 :
self.velocity = (old_loc-dst ) * fps/self.PERIOD
self.__count += 1
def update_coord(self,box):
self.xmax = box.xmax
self.xmin = box.xmin
self.ymin = box.ymin
self.ymax = box.ymax
self.xmid = int((box.xmax+box.xmin)/2)
self.ymid = int((box.ymax+box.ymin)/2)
def update_score(self,box):
self.score=box.score
self.label = box.label
def update_box(self,box):
self.update_coord(box)
self.update_score(box)
class TRAFFIC_LIGHTS(OBSTACLE) :
def __init__(self) :
return None
def detect_status(self):
return None
class TRAFFIC_SIGNS(OBSTACLE):
def __init__(self) :
return None
def decipher(self):
return None
class FRAME :
fps:float
UNWARPED_SIZE :(int,int)
LANE_WIDTH :int
WRAPPED_WIDTH : int
camera : CAMERA
yolo : classmethod
PERSP_PERIOD = 100000
YOLO_PERIOD = 0.5 # SECONDS
_defaults = {
"id": 0,
"first": True,
"speed": 0,
"n_objects" :0,
"camera" : CAMERA(),
"image" : [],
"LANE_WIDTH" : 3.66,
"fps" :22
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
# calc pers => detect cars and dist > detect lanes
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.speed = self.get_speed()
### IMAGE PROPERTIES
self.image : np.ndarray
if self.image.size ==0 :
raise ValueError("No Image")
self.temp_dir = './images/detection/'
self.size : (int , int) = (self.image.shape[0] , self.image.shape[1] )
self.UNWARPED_SIZE = (int(self.size[0]),int(self.size[1]*.4))
self.WRAPPED_WIDTH = int(self.UNWARPED_SIZE[0]*1.25)
self.trans_mat = None
self.inv_trans_mat = None
self.pixels_per_meter = [0,0]
self.perspective_done_at = 0
self.img_shp = (self.image.shape[1], self.image.shape[0] )
self.area = self.img_shp[0]*self.img_shp[1]
# self.image = self.camera.undistort(self.image)
### OBJECT DETECTION AND TRACKING
self.yolo = YOLO()
self.first_detect = True
self.obstacles :[OBSTACLE] =[]
self.__yp = int(self.YOLO_PERIOD*self.fps)
### LANE FINDER
self.lane_found = False
self.count = 0
self.mask = np.zeros((self.UNWARPED_SIZE[1], self.UNWARPED_SIZE[0], 3), dtype=np.uint8)
self.roi_mask = np.ones((self.UNWARPED_SIZE[1], self.UNWARPED_SIZE[0], 3), dtype=np.uint8)
self.total_mask = np.zeros_like(self.roi_mask)
self.warped_mask = np.zeros((self.UNWARPED_SIZE[1], self.UNWARPED_SIZE[0]), dtype=np.uint8)
self.lane_count = 0
self.left_line = LaneLineFinder(self.UNWARPED_SIZE, self.pixels_per_meter, -1.8288) # 6 feet in meters
self.right_line = LaneLineFinder(self.UNWARPED_SIZE, self.pixels_per_meter, 1.8288)
def perspective_tfm(self , pos) :
now = datetime.utcnow().timestamp()
if now - self.perspective_done_at > self.PERSP_PERIOD :
self.calc_perspective()
return cv2.perspectiveTransform(pos, self.trans_mat)
#cv2.warpPerspective(image, self.trans_mat, self.UNWARPED_SIZE)
def calc_perspective(self, verbose = True):
roi = np.zeros((self.size[0], self.size[1]), dtype=np.uint8) # 720 , 1280
roi_points = np.array([[0, self.size[0]],[self.size[1],self.size[0]],
[self.size[1]//2+100,-0*self.size[0]],
[self.size[1]//2-100,-0*self.size[0]]], dtype=np.int32)
cv2.fillPoly(roi, [roi_points], 1)
Lhs = np.zeros((2,2), dtype= np.float32)
Rhs = np.zeros((2,1), dtype= np.float32)
grey = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
mn_hsl = np.median(grey) #grey.median()
edges = cv2.Canny(grey, int(mn_hsl*4), int(mn_hsl*3))
# edges = cv2.Canny(grey[:, :, 1], 500, 400)
edges2 = edges*roi
cv2.imwrite(self.temp_dir+"mask.jpg", edges2)
lines = cv2.HoughLinesP(edges*roi,rho = 4,theta = np.pi/180,threshold = 4,minLineLength = 80,maxLineGap = 40)
# print(lines)
for line in lines:
for x1, y1, x2, y2 in line:
normal = np.array([[-(y2-y1)], [x2-x1]], dtype=np.float32)
normal /=np.linalg.norm(normal)
point = np.array([[x1],[y1]], dtype=np.float32)
outer = np.matmul(normal, normal.T)
Lhs += outer
Rhs += np.matmul(outer, point)
vanishing_point = np.matmul(np.linalg.inv(Lhs),Rhs)
top = vanishing_point[1] + 50
bottom = self.size[1]-100
def on_line(p1, p2, ycoord):
return [p1[0]+ (p2[0]-p1[0])/float(p2[1]-p1[1])*(ycoord-p1[1]), ycoord]
#define source and destination targets
p1 = [vanishing_point[0] - self.WRAPPED_WIDTH/2, top]
p2 = [vanishing_point[0] + self.WRAPPED_WIDTH/2, top]
p3 = on_line(p2, vanishing_point, bottom)
p4 = on_line(p1, vanishing_point, bottom)
src_points = np.array([p1,p2,p3,p4], dtype=np.float32)
# print(src_points,vanishing_point)
dst_points = np.array([[0, 0], [self.UNWARPED_SIZE[0], 0],
[self.UNWARPED_SIZE[0], self.UNWARPED_SIZE[1]],
[0, self.UNWARPED_SIZE[1]]], dtype=np.float32)
self.trans_mat = cv2.getPerspectiveTransform(src_points, dst_points)
self.inv_trans_mat = cv2.getPerspectiveTransform(dst_points,src_points)
min_wid = 1000
img = cv2.warpPerspective(self.image, self.trans_mat, self.UNWARPED_SIZE)
grey = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
mask = grey[:,:,1]>128
mask[:, :50]=0
mask[:, -50:]=0
cv2.imshow("grey", grey)
cv2.waitKey(0)
cv2.destroyAllWindows()
mom = cv2.moments(mask[:,:self.UNWARPED_SIZE[0]//2].astype(np.uint8))
x1 = mom["m10"]/mom["m00"]
mom = cv2.moments(mask[:,self.UNWARPED_SIZE[0]//2:].astype(np.uint8))
x2 = self.UNWARPED_SIZE[0]//2 + mom["m10"]/mom["m00"]
if (x2-x1<min_wid):
min_wid = x2-x1
self.pixels_per_meter[0] = min_wid/self.LANE_WIDTH
if False :#self.camera.callibration_done :
Lh = np.linalg.inv(np.matmul(self.trans_mat, self.camera.cam_matrix))
else:
Lh = np.linalg.inv(self.trans_mat)
self.pixels_per_meter[1] = self.pixels_per_meter[0] * np.linalg.norm(Lh[:,0]) / np.linalg.norm(Lh[:,1])
self.perspective_done_at = datetime.utcnow().timestamp()
if verbose :
img_orig = cv2.polylines(self.image, [src_points.astype(np.int32)],True, (0,0,255), thickness=5)
cv2.line(img, (int(x1), 0), (int(x1), self.UNWARPED_SIZE[1]), (255, 0, 0), 3)
cv2.line(img, (int(x2), 0), (int(x2), self.UNWARPED_SIZE[1]), (0, 0, 255), 3)
cv2.circle(img_orig,tuple(vanishing_point),10, color=(0,0,255), thickness=5)
cv2.imwrite(self.temp_dir+"perspective1.jpg",img_orig)
cv2.imwrite(self.temp_dir+"perspective2.jpg",img)
# cv2.imshow(cv2.hconcat((img_orig, cv2.resize(img, img_orig.shape))))
return
def get_speed(self):
return 30
def determine_lane(self, box:OBSTACLE):
points =np.array( [box.xmid, box.ymid], dtype='float32').reshape(1,1,2)
new_points = cv2.perspectiveTransform(points,self.inv_trans_mat)
new_points = new_points.reshape(2)
left =
|
np.polyval(self.left_line.poly_coeffs,new_points[0])
|
numpy.polyval
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
from scipy.io import wavfile
from scipy.fftpack import fft,fftfreq
from numpy.lib.function_base import angle
from calibratesdr.dvbt import dvbt_const as dc
import os
from rtlsdr import RtlSdr
# def file_input():
# '''
# Input the data file.
# '''
# os.chdir('calibratesdr/dvbt/test')
# filename = input("Input Recorded File Name:")
# # filename = 'DVB-T 5MHz 5333333sps_IQ.wav'
# frequency_center = int(input("Central Frequency:"))
# # frequency_center = 5333333
# samplerate, data = wavfile.read(filename)
# return data,samplerate,frequency_center
def wavtoiq(data):
'''
Convert the Interleaved IQ wav file to complex format
'''
data = np.ravel(data)
iq_data = data[0::2] + 1j*data[1::2]
return iq_data
def run_dsp(iq_data):
constellations, num_symbols = get_constellations(iq_data)
return constellations, num_symbols
def get_constellations(iq_data):
'''
Find the start of a symbol and extract constellation data
'''
iq_dc = iq_data -
|
np.mean(iq_data)
|
numpy.mean
|
"""
:Authors: - <NAME>
"""
import numpy as np
from .extractor import TableLookup
class RuleTable(TableLookup):
CDEC_DEFAULT = 'Glue PassThrough EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE'.split()
def __init__(self, uid, name, fnames=CDEC_DEFAULT):
super(RuleTable, self).__init__(uid, name)
self._fnames = tuple(fnames)
def __repr__(self):
return '{0}(uid={1}, name={2}, fnames={3})'.format(RuleTable.__name__,
repr(self.id),
repr(self.name),
repr(self._fnames))
def weights(self, wmap): # using a dense representation
wvec = []
for f in self._fnames:
try:
wvec.append(wmap[f])
except KeyError:
raise KeyError('Missing RuleTable feature: %s' % f)
return
|
np.array(wvec, float)
|
numpy.array
|
from __future__ import print_function, division, absolute_import
import math
import copy
import numbers
import sys
import os
import json
import types
import warnings
import numpy as np
import cv2
import imageio
import scipy.spatial.distance
import six
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
from PIL import Image as PIL_Image, ImageDraw as PIL_ImageDraw, ImageFont as PIL_ImageFont
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
SEED_MIN_VALUE = 0
SEED_MAX_VALUE = 2**31-1 # use 2**31 instead of 2**32 here because 2**31 errored on some systems
# to check if a dtype instance is among these dtypes, use e.g. `dtype.type in NP_FLOAT_TYPES`
# do not just use `dtype in NP_FLOAT_TYPES` as that would fail
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = ["nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here via isinstance(val, (np.ndarray, np.generic)) seems to also fire for scalar numpy values
# even though those are not arrays
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_single_bool(val):
"""
Checks whether a variable is a boolean.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a boolean. Otherwise False.
"""
return type(val) == type(True)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def is_generator(val):
"""
Checks whether a variable is a generator.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True is the variable is a generator. Otherwise False.
"""
return isinstance(val, types.GeneratorType)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
numpy.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
numpy.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
numpy.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : numpy.random.RandomState
Random state to forward.
"""
random_state.uniform()
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
do_assert(len(from_shape) in [2, 3])
do_assert(len(to_shape) in [2, 3])
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
do_assert(all([v is None or is_single_number(v) for v in to_shape[0:2]]),
"Expected the first two entries in to_shape to be None or numbers, "
+ "got types %s." % (str([type(v) for v in to_shape[0:2]]),))
for i, from_shape_i in enumerate(from_shape[0:2]):
if to_shape[i] is None:
to_shape_computed[i] = from_shape_i
elif is_single_integer(to_shape[i]):
to_shape_computed[i] = to_shape[i]
else: # float
to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return tuple(to_shape_computed)
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.resize(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.570796...
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592...
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# TODO is this used anywhere?
def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
"""
Compute the intersection point of two lines.
Taken from https://stackoverflow.com/a/20679579 .
Parameters
----------
x1 : number
x coordinate of the first point on line 1. (The lines extends beyond this point.)
y1 : number:
y coordinate of the first point on line 1. (The lines extends beyond this point.)
x2 : number
x coordinate of the second point on line 1. (The lines extends beyond this point.)
y2 : number:
y coordinate of the second point on line 1. (The lines extends beyond this point.)
x3 : number
x coordinate of the first point on line 2. (The lines extends beyond this point.)
y3 : number:
y coordinate of the first point on line 2. (The lines extends beyond this point.)
x4 : number
x coordinate of the second point on line 2. (The lines extends beyond this point.)
y4 : number:
y coordinate of the second point on line 2. (The lines extends beyond this point.)
Returns
-------
tuple of number or bool
The coordinate of the intersection point as a tuple ``(x, y)``.
If the lines are parallel (no intersection point or an infinite number of them), the result is False.
"""
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False
# TODO replace by cv2.putText()?
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: limited; tested (4)
* ``int64``: no (2)
* ``float16``: yes; tested (5)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (6)
- (1) rejected by ``cv2.imresize``
- (2) results too inaccurate
- (3) mapped internally to ``int16`` when interpolation!="nearest"
- (4) only supported for interpolation="nearest", other interpolations lead to cv2 error
- (5) mapped internally to ``float32``
- (6) mapped internally to ``uint8``
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
* ``area`` (identical to ``cv2.INTER_AREA``)
* ``cubic`` (identical to ``cv2.INTER_CUBIC``)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',[C]) ndarray
Array of the resized images.
Examples
--------
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)
Converts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))
Converts 2 RGB images of height and width 16 to images of height 16 and width 32.
>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))
Converts 2 RGB images of height and width 16 to images of height 32 and width 64.
"""
# we just do nothing if the input contains zero images
# one could also argue that an exception would be appropriate here
if len(images) == 0:
return images
# verify that all input images have height/width > 0
do_assert(
all([image.shape[0] > 0 and image.shape[1] > 0 for image in images]),
("Cannot resize images, because at least one image has a height and/or width of zero. "
+ "Observed shapes were: %s.") % (str([image.shape for image in images]),)
)
# verify that sizes contains only values >0
if is_single_number(sizes) and sizes <= 0:
raise Exception(
"Cannot resize to the target size %.8f, because the value is zero or lower than zero." % (sizes,))
elif isinstance(sizes, tuple) and (sizes[0] <= 0 or sizes[1] <= 0):
sizes_str = [
"int %d" % (sizes[0],) if is_single_integer(sizes[0]) else "float %.8f" % (sizes[0],),
"int %d" % (sizes[1],) if is_single_integer(sizes[1]) else "float %.8f" % (sizes[1],),
]
sizes_str = "(%s, %s)" % (sizes_str[0], sizes_str[1])
raise Exception(
"Cannot resize to the target sizes %s. At least one value is zero or lower than zero." % (sizes_str,))
# change after the validation to make the above error messages match the original input
if is_single_number(sizes):
sizes = (sizes, sizes)
else:
do_assert(len(sizes) == 2, "Expected tuple with exactly two entries, got %d entries." % (len(sizes),))
do_assert(all([is_single_number(val) for val in sizes]),
"Expected tuple with two ints or floats, got types %s." % (str([type(val) for val in sizes]),))
# if input is a list, call this function N times for N images
# but check beforehand if all images have the same shape, then just convert to a single array and de-convert
# afterwards
if isinstance(images, list):
nb_shapes = len(set([image.shape for image in images]))
if nb_shapes == 1:
return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))
else:
return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[0, ...]
for image in images]
shape = images.shape
do_assert(images.ndim in [3, 4], "Expected array of shape (N, H, W, [C]), got shape %s" % (str(shape),))
nb_images = shape[0]
im_height, im_width = shape[1], shape[2]
nb_channels = shape[3] if images.ndim > 3 else None
height, width = sizes[0], sizes[1]
height = int(np.round(im_height * height)) if is_single_float(height) else height
width = int(np.round(im_width * width)) if is_single_float(width) else width
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
else: # if ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
# TODO find more beautiful way to avoid circular imports
from . import dtypes as iadt
if ip == cv2.INTER_NEAREST:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "int32", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
else:
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int32", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
result_shape = (nb_images, height, width)
if nb_channels is not None:
result_shape = result_shape + (nb_channels,)
result = np.zeros(result_shape, dtype=images.dtype)
for i, image in enumerate(images):
input_dtype = image.dtype
if image.dtype.type == np.bool_:
image = image.astype(np.uint8) * 255
elif image.dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
image = image.astype(np.int16)
elif image.dtype.type == np.float16:
image = image.astype(np.float32)
result_img = cv2.resize(image, (width, height), interpolation=ip)
assert result_img.dtype == image.dtype
# cv2 removes the channel axis if input was (H, W, 1)
# we re-add it (but only if input was not (H, W))
if len(result_img.shape) == 2 and nb_channels is not None and nb_channels == 1:
result_img = result_img[:, :, np.newaxis]
if input_dtype.type == np.bool_:
result_img = result_img > 127
elif input_dtype.type == np.int8 and ip != cv2.INTER_NEAREST:
# TODO somehow better avoid circular imports here
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.int8)
elif input_dtype.type == np.float16:
# TODO see above
from . import dtypes as iadt
result_img = iadt.restore_dtypes_(result_img, np.float16)
result[i] = result_img
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
dtype support::
See :func:`imgaug.imgaug.imresize_many_images`.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
See :func:`imgaug.imgaug.imresize_many_images`.
interpolation : None or str or int, optional
See :func:`imgaug.imgaug.imresize_many_images`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
# TODO add crop() function too
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""
Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
dtype support::
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; fully tested (1)
* ``uint32``: yes; fully tested (2) (3)
* ``uint64``: yes; fully tested (2) (3)
* ``int8``: yes; fully tested (1)
* ``int16``: yes; fully tested (1)
* ``int32``: yes; fully tested (1)
* ``int64``: yes; fully tested (2) (3)
* ``float16``: yes; fully tested (2) (3)
* ``float32``: yes; fully tested (1)
* ``float64``: yes; fully tested (1)
* ``float128``: yes; fully tested (2) (3)
* ``bool``: yes; tested (2) (3)
- (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``, ``"reflect"``, ``"symmetric"``.
Otherwise uses ``numpy``.
- (2) Uses ``numpy``.
- (3) Rejected by ``cv2``.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add at the top side of the image. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the image. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the image. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the image. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``
parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``
parameter to :func:`numpy.pad`.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
The cval is expected to match the input array's dtype and value range.
Returns
-------
arr_pad : (H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(top >= 0)
do_assert(right >= 0)
do_assert(bottom >= 0)
do_assert(left >= 0)
if top > 0 or right > 0 or bottom > 0 or left > 0:
mapping_mode_np_to_cv2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"linear_ramp": None,
"maximum": None,
"mean": None,
"median": None,
"minimum": None,
"reflect": cv2.BORDER_REFLECT_101,
"symmetric": cv2.BORDER_REFLECT,
"wrap": None,
cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,
cv2.BORDER_REFLECT: cv2.BORDER_REFLECT
}
bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None
# these datatypes all simply generate a "TypeError: src data type = X is not supported" error
bad_datatype_cv2 = arr.dtype.name in ["uint32", "uint64", "int64", "float16", "float128", "bool"]
if not bad_datatype_cv2 and not bad_mode_cv2:
cval = float(cval) if arr.dtype.kind == "f" else int(cval) # results in TypeError otherwise for np inputs
if arr.ndim == 2 or arr.shape[2] <= 4:
# without this, only the first channel is padded with the cval, all following channels with 0
if arr.ndim == 3:
cval = tuple([cval] * arr.shape[2])
arr_pad = cv2.copyMakeBorder(arr, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval)
if arr.ndim == 3 and arr_pad.ndim == 2:
arr_pad = arr_pad[..., np.newaxis]
else:
result = []
channel_start_idx = 0
while channel_start_idx < arr.shape[2]:
arr_c = arr[..., channel_start_idx:channel_start_idx+4]
cval_c = tuple([cval] * arr_c.shape[2])
arr_pad_c = cv2.copyMakeBorder(arr_c, top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval_c)
arr_pad_c = np.atleast_3d(arr_pad_c)
result.append(arr_pad_c)
channel_start_idx += 4
arr_pad = np.concatenate(result, axis=2)
else:
paddings_np = [(top, bottom), (left, right)] # paddings for 2d case
if arr.ndim == 3:
paddings_np.append((0, 0)) # add paddings for 3d case
if mode == "constant":
arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)
elif mode == "linear_ramp":
arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
# TODO allow shape as input instead of array
def compute_paddings_for_aspect_ratio(arr, aspect_ratio):
"""
Compute the amount of pixels by which an array has to be padded to fulfill an aspect ratio.
The aspect ratio is given as width/height.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
Returns
-------
result : tuple of int
Required paddign amounts to reach the target aspect ratio, given as a tuple
of the form ``(top, right, bottom, left)``.
"""
do_assert(arr.ndim in [2, 3])
do_assert(aspect_ratio > 0)
height, width = arr.shape[0:2]
do_assert(height > 0)
aspect_ratio_current = width / height
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if aspect_ratio_current < aspect_ratio:
# vertical image, height > width
diff = (aspect_ratio * height) - width
pad_right = int(np.ceil(diff / 2))
pad_left = int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# horizontal image, width > height
diff = ((1/aspect_ratio) * width) - height
pad_top = int(np.floor(diff / 2))
pad_bottom = int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False):
"""
Pad an image-like array on its sides so that it matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
dtype support::
See :func:`imgaug.imgaug.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
arr_padded : (H',W') ndarray or (H',W',C) ndarray
Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.
tuple of int
Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
Otherwise only ``arr_padded`` is returned.
"""
pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
else:
return arr_padded
def pool(arr, block_size, func, cval=0, preserve_dtype=True):
"""
Resize an array by pooling values within blocks.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (2)
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (2)
* ``int64``: no (1)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested (2)
* ``bool``: yes; tested
- (1) results too inaccurate (at least when using np.average as func)
- (2) Note that scikit-image documentation says that the wrapped pooling function converts
inputs to float64. Actual tests showed no indication of that happening (at least when
using preserve_dtype=True).
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. Ideally of datatype ``numpy.float64``.
block_size : int or tuple of int
Spatial size of each group of values to pool, aka kernel size.
If a single integer, then a symmetric block of that size along height and width will be used.
If a tuple of two values, it is assumed to be the block size along height and width of the image-like,
with pooling happening per channel.
If a tuple of three values, it is assumed to be the block size along height, width and channels.
func : callable
Function to apply to a given block in order to convert it to a single number,
e.g. :func:`numpy.average`, :func:`numpy.min`, :func:`numpy.max`.
cval : number, optional
Value to use in order to pad the array along its border if the array cannot be divided
by `block_size` without remainder.
preserve_dtype : bool, optional
Whether to convert the array back to the input datatype if it is changed away from
that in the pooling process.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after pooling.
"""
# TODO find better way to avoid circular import
from . import dtypes as iadt
iadt.gate_dtypes(arr,
allowed=["bool", "uint8", "uint16", "uint32", "int8", "int16", "int32",
"float16", "float32", "float64", "float128"],
disallowed=["uint64", "uint128", "uint256", "int64", "int128", "int256",
"float256"],
augmenter=None)
do_assert(arr.ndim in [2, 3])
is_valid_int = is_single_integer(block_size) and block_size >= 1
is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \
and [is_single_integer(val) and val >= 1 for val in block_size]
do_assert(is_valid_int or is_valid_tuple)
if is_single_integer(block_size):
block_size = [block_size, block_size]
if len(block_size) < arr.ndim:
block_size = list(block_size) + [1]
input_dtype = arr.dtype
arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func, cval=cval)
if preserve_dtype and arr_reduced.dtype.type != input_dtype:
arr_reduced = arr_reduced.astype(input_dtype)
return arr_reduced
def avg_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using average pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See :func:`imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after average pooling.
"""
return pool(arr, block_size, np.average, cval=cval, preserve_dtype=preserve_dtype)
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested
* ``float128``: yes; fully tested
* ``bool``: yes; fully tested
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
rows : None or int, optional
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
nb_images = len(images)
do_assert(nb_images > 0)
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
dts = [image.dtype.name for image in images]
nb_dtypes = len(set(dts))
do_assert(nb_dtypes == 1, ("All images provided to draw_grid() must have the same dtype, "
+ "found %d dtypes (%s)") % (nb_dtypes, ", ".join(dts)))
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(
len(channels) == 1,
"All images are expected to have the same number of channels, "
+ "but got channel set %s with length %d instead." % (str(channels), len(channels))
)
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
dt = images.dtype if is_np_array(images) else images[0].dtype
grid = np.zeros((height, width, nb_channels), dtype=dt)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid)
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
dpi = 96
h, w = image.shape[0] / dpi, image.shape[1] / dpi
w = max(w, 6) # if the figure is too narrow, the footer may appear and make the fig suddenly wider (ugly)
fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)
fig.canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
ax.imshow(image, cmap="gray") # cmap is only activate for grayscale images
plt.show()
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : str, optional
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional
A function that gives permission to execute an augmenter.
The expected interface is ``f(images, augmenter, parents, default)``,
where ``images`` are the input images to augment, ``augmenter`` is the
instance of the augmenter to execute, ``parents`` are previously
executed augmenters and ``default`` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is ``f(images, augmenter, parents, default)``,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional
A function to call before an augmenter performed any augmentations.
The interface is ``f(images, augmenter, parents)``,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>> images = [np.zeros((10, 10), dtype=np.uint8)]
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps = [np.random.rand(*(3, 10, 10))]
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
bool
If True, the augmenter may be propagate to its children. If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per augmenter).
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksHeatmaps(HooksImages):
"""
Class to intervene with heatmap augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(np.round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(np.round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [Keypoint(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])]
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of imgaug.Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> image = np.zeros((70, 70))
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=(0, 255, 0), size=3, copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image. Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
size : int, optional
The size of each point. If set to ``C``, each square will have size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image.
Each first entry ``coords[i, 0]`` is expected to be the x coordinate.
Each second entry ``coords[i, 1]`` is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
# TODO add to_gaussian_heatmaps(), from_gaussian_heatmaps()
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps
# TODO add option to if_not_found_coords to reuse old keypoint coords
@staticmethod
def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim =
|
np.unravel_index(hitidx_flat, (height, width))
|
numpy.unravel_index
|
#!/usr/bin/env python
# wujian@2019
"""
Sound Source Localization (SSL) Module
"""
import numpy as np
from .utils import cmat_abs
def ml_ssl(stft, sv, compression=0, eps=1e-8, norm=False, mask=None):
"""
Maximum likelihood SSL
Arguments:
stft: STFT transform result, M x T x F
sv: steer vector in each directions, A x M x F
norm: normalze STFT or not
mask: TF-mask for source, T x F x (N)
Return:
index: DoA index
"""
_, T, F = stft.shape
if mask is None:
mask = np.ones([T, F])
# make sure sv is normalized
sv = sv / np.linalg.norm(sv, axis=1, keepdims=True)
if norm:
stft = stft / np.maximum(cmat_abs(stft), eps)
ssh_cor = np.abs(np.einsum("mtf,mtf->tf", stft, stft.conj()))
ssv_cor = np.abs(np.einsum("amf,mtf->atf", sv, stft.conj()))**2
# A x T x F
delta = ssh_cor[None, ...] - ssv_cor / (1 + eps)
if compression <= 0:
tf_loglike = -np.log(np.maximum(delta, eps))
else:
tf_loglike = -np.power(delta, compression)
# masking
if mask.ndim == 2:
loglike = np.sum(mask[None, ...] * tf_loglike, (1, 2))
else:
loglike = np.einsum("ntf,atf->na", mask, tf_loglike)
return np.argmax(loglike, axis=-1)
def srp_ssl(stft, sv, srp_pair=None, mask=None):
"""
Do SRP-PHAT based SSL
Arguments:
stft: STFT transform result, M x T x F
sv: steer vector in each directions, A x M x F
srp_pair: index pair to compute srp response
mask: TF-mask for source, T x F
Return:
index: DoA index
"""
if srp_pair is None:
raise ValueError("srp_pair cannot be None, (list, list)")
_, T, F = stft.shape
if mask is None:
mask = np.ones([T, F])
index_l, index_r = srp_pair
# M x T x F
obs_pha = np.angle(stft)
# A x M x F
ora_pha = np.angle(sv)
# observed ipd: P x T x F
obs_ipd = obs_pha[index_l] - obs_pha[index_r]
# oracle ipd: A x P x F
ora_ipd = ora_pha[:, index_l] - ora_pha[:, index_r]
# directional feature: A x P x T x F
af =
|
np.cos(obs_ipd[None, ...] - ora_ipd[..., None, :])
|
numpy.cos
|
import cv2 as cv
import shapely.geometry as sg
import numpy as np
import time
import os
from lxml import etree
MIP36h12 = [0xd2b63a09d, 0x6001134e5, 0x1206fbe72, 0xff8ad6cb4, 0x85da9bc49, 0xb461afe9c,
0x6db51fe13, 0x5248c541f, 0x8f34503, 0x8ea462ece, 0xeac2be76d, 0x1af615c44,
0xb48a49f27, 0x2e4e1283b, 0x78b1f2fa8, 0x27d34f57e, 0x89222fff1, 0x4c1669406,
0xbf49b3511, 0xdc191cd5d, 0x11d7c3f85, 0x16a130e35, 0xe29f27eff, 0x428d8ae0c,
0x90d548477, 0x2319cbc93, 0xc3b0c3dfc, 0x424bccc9, 0x2a081d630, 0x762743d96,
0xd0645bf19, 0xf38d7fd60, 0xc6cbf9a10, 0x3c1be7c65, 0x276f75e63, 0x4490a3f63,
0xda60acd52, 0x3cc68df59, 0xab46f9dae, 0x88d533d78, 0xb6d62ec21, 0xb3c02b646,
0x22e56d408, 0xac5f5770a, 0xaaa993f66, 0x4caa07c8d, 0x5c9b4f7b0, 0xaa9ef0e05,
0x705c5750, 0xac81f545e, 0x735b91e74, 0x8cc35cee4, 0xe44694d04, 0xb5e121de0,
0x261017d0f, 0xf1d439eb5, 0xa1a33ac96, 0x174c62c02, 0x1ee27f716, 0x8b1c5ece9,
0x6a05b0c6a, 0xd0568dfc, 0x192d25e5f, 0x1adbeccc8, 0xcfec87f00, 0xd0b9dde7a,
0x88dcef81e, 0x445681cb9, 0xdbb2ffc83, 0xa48d96df1, 0xb72cc2e7d, 0xc295b53f,
0xf49832704, 0x9968edc29, 0x9e4e1af85, 0x8683e2d1b, 0x810b45c04, 0x6ac44bfe2,
0x645346615, 0x3990bd598, 0x1c9ed0f6a, 0xc26729d65, 0x83993f795, 0x3ac05ac5d,
0x357adff3b, 0xd5c05565, 0x2f547ef44, 0x86c115041, 0x640fd9e5f, 0xce08bbcf7,
0x109bb343e, 0xc21435c92, 0x35b4dfce4, 0x459752cf2, 0xec915b82c, 0x51881eed0,
0x2dda7dc97, 0x2e0142144, 0x42e890f99, 0x9a8856527, 0x8e80d9d80, 0x891cbcf34,
0x25dd82410, 0x239551d34, 0x8fe8f0c70, 0x94106a970, 0x82609b40c, 0xfc9caf36,
0x688181d11, 0x718613c08, 0xf1ab7629, 0xa357bfc18, 0x4c03b7a46, 0x204dedce6,
0xad6300d37, 0x84cc4cd09, 0x42160e5c4, 0x87d2adfa8, 0x7850e7749, 0x4e750fc7c,
0xbf2e5dfda, 0xd88324da5, 0x234b52f80, 0x378204514, 0xabdf2ad53, 0x365e78ef9,
0x49caa6ca2, 0x3c39ddf3, 0xc68c5385d, 0x5bfcbbf67, 0x623241e21, 0xabc90d5cc,
0x388c6fe85, 0xda0e2d62d, 0x10855dfe9, 0x4d46efd6b, 0x76ea12d61, 0x9db377d3d,
0xeed0efa71, 0xe6ec3ae2f, 0x441faee83, 0xba19c8ff5, 0x313035eab, 0x6ce8f7625,
0x880dab58d, 0x8d3409e0d, 0x2be92ee21, 0xd60302c6c, 0x469ffc724, 0x87eebeed3,
0x42587ef7a, 0x7a8cc4e52, 0x76a437650, 0x999e41ef4, 0x7d0969e42, 0xc02baf46b,
0x9259f3e47, 0x2116a1dc0, 0x9f2de4d84, 0xeffac29, 0x7b371ff8c, 0x668339da9,
0xd010aee3f, 0x1cd00b4c0, 0x95070fc3b, 0xf84c9a770, 0x38f863d76, 0x3646ff045,
0xce1b96412, 0x7a5d45da8, 0x14e00ef6c, 0x5e95abfd8, 0xb2e9cb729, 0x36c47dd7,
0xb8ee97c6b, 0xe9e8f657, 0xd4ad2ef1a, 0x8811c7f32, 0x47bde7c31, 0x3adadfb64,
0x6e5b28574, 0x33e67cd91, 0x2ab9fdd2d, 0x8afa67f2b, 0xe6a28fc5e, 0x72049cdbd,
0xae65dac12, 0x1251a4526, 0x1089ab841, 0xe2f096ee0, 0xb0caee573, 0xfd6677e86,
0x444b3f518, 0xbe8b3a56a, 0x680a75cfc, 0xac02baea8, 0x97d815e1c, 0x1d4386e08,
0x1a14f5b0e, 0xe658a8d81, 0xa3868efa7, 0x3668a9673, 0xe8fc53d85, 0x2e2b7edd5,
0x8b2470f13, 0xf69795f32, 0x4589ffc8e, 0x2e2080c9c, 0x64265f7d, 0x3d714dd10,
0x1692c6ef1, 0x3e67f2f49, 0x5041dad63, 0x1a1503415, 0x64c18c742, 0xa72eec35,
0x1f0f9dc60, 0xa9559bc67, 0xf32911d0d, 0x21c0d4ffc, 0xe01cef5b0, 0x4e23a3520,
0xaa4f04e49, 0xe1c4fcc43, 0x208e8f6e8, 0x8486774a5, 0x9e98c7558, 0x2c59fb7dc,
0x9446a4613, 0x8292dcc2e, 0x4d61631, 0xd05527809, 0xa0163852d, 0x8f657f639,
0xcca6c3e37, 0xcb136bc7a, 0xfc5a83e53, 0x9aa44fc30, 0xbdec1bd3c, 0xe020b9f7c,
0x4b8f35fb0, 0xb8165f637, 0x33dc88d69, 0x10a2f7e4d, 0xc8cb5ff53, 0xde259ff6b,
0x46d070dd4, 0x32d3b9741, 0x7075f1c04, 0x4d58dbea0]
MIP36h12_set = set(MIP36h12)
def get_rand_val(val_range):
min_val = val_range[0]
max_val = val_range[1]
return np.random.rand() * (max_val - min_val) + min_val
class Polygon:
def __init__(self, np_poly):
self.np_poly = np_poly
curr_poly = []
for i in range(4):
curr_poly.append((np_poly[i, 0], np_poly[i, 1]))
self.sg_poly = sg.Polygon(curr_poly)
def intersects_with(self, polygon2):
return self.sg_poly.intersects(polygon2.sg_poly)
def get_cam_dir_rot(X, Y, Z):
V = [X, Y, Z] # vector going though the camera center and then through the point
V = V / np.linalg.norm(V)
Z_vec = [0, 0, 1]
axis = np.cross(Z_vec, V)
angle = np.arccos(np.dot(V, Z_vec))
rot_vec = axis / np.linalg.norm(axis) * angle
cam_dir_rot, _ = cv.Rodrigues(rot_vec)
return cam_dir_rot
def make_sure_dir_exists(dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
class ImageGenerator:
def __init__(self): # physical units are in meters and radians
self.marker_corners = np.array([[-0.5, -0.5, 0], [0.5, -0.5, 0], [0.5, 0.5, 0], [-0.5, 0.5, 0]]).transpose()
self.fov_range = [np.pi / 3, np.pi / 18 * 10]
self.im_width = 640
self.im_height = 360
self.im_size = (self.im_width, self.im_height)
self.marker_size_range = [0.16, 0.32]
self.num_markers_range = [1, 11]
self.Z_range = [0.3, 1.8]
self.max_oop_rot = np.pi / 4 # out of plane (oop) maximum rotation
self.horizontal_flip_prob = 0.1
self.vertical_flip_prob = 0
self.marker_pixels = self.im_height
self.mborder_size = int(self.marker_pixels / 8)
self.mimage_size = self.marker_pixels + 2 * self.mborder_size
self.files_list = []
self.input_dataset_folder = ""
self.ids_permutation = []
self.id_perm_index = 0
self.max_color_mod = 25
def write_pascal_voc_xml(self, xml_file_path, image_name, bounding_boxes, marker_corners, marker_ids):
root = etree.Element("annotation")
etree.SubElement(root, "folder").text = "images"
etree.SubElement(root, "filename").text = image_name
etree.SubElement(root, "path").text = ""
source = etree.SubElement(root, "source")
etree.SubElement(source, "database").text = "Unknown"
size = etree.SubElement(root, "size")
etree.SubElement(size, "width").text = str(self.im_width)
etree.SubElement(size, "height").text = str(self.im_height)
etree.SubElement(size, "depth").text = "3"
etree.SubElement(root, "segmented").text = "0"
for i in range(len(bounding_boxes)):
x = bounding_boxes[i][0]
y = bounding_boxes[i][1]
width = bounding_boxes[i][2]
height = bounding_boxes[i][3]
x_min = int(x - width / 2 + .5)
x_max = int(x + width / 2 + .5)
y_min = int(y - height / 2 + .5)
y_max = int(y + height / 2 + .5)
object_node = etree.SubElement(root, "object")
etree.SubElement(object_node, "name").text = "aruco"
etree.SubElement(object_node, "pose").text = "Unspecified"
etree.SubElement(object_node, "truncated").text = "0"
etree.SubElement(object_node, "difficult").text = "0"
bndbox = etree.SubElement(object_node, "bndbox")
etree.SubElement(bndbox, "xmin").text = str(x_min)
etree.SubElement(bndbox, "ymin").text = str(y_min)
etree.SubElement(bndbox, "xmax").text = str(x_max)
etree.SubElement(bndbox, "ymax").text = str(y_max)
corners = etree.SubElement(object_node, "corners")
for j in range(4):
corner = marker_corners[i][j]
etree.SubElement(corners, "x" + str(j)).text = str(corner[0])
etree.SubElement(corners, "y" + str(j)).text = str(corner[1])
etree.SubElement(object_node, "id").text = str(marker_ids[i])
xml_file = open(xml_file_path, "w")
xml_file.write(etree.tostring(etree.ElementTree(root), encoding="unicode", pretty_print=True))
xml_file.close()
def set_input_folder(self, input_dataset_folder):
print("Listing the images in the directory... ")
self.files_list = os.listdir(input_dataset_folder)
print("Done!")
self.input_dataset_folder = input_dataset_folder + "/"
def read_random_photo(self):
file_index = int(np.random.rand() * len(self.files_list))
image = cv.imread(self.input_dataset_folder + self.files_list[file_index], cv.IMREAD_COLOR)
height, width, _ = image.shape
if height > width:
image = cv.rotate(image, cv.ROTATE_90_CLOCKWISE)
height, width, _ = image.shape
scale = max(float(self.im_width) / width, float(self.im_height) / height)
if scale > 1:
image = cv.resize(image, (0, 0), fx=scale, fy=scale)
height, width, _ = image.shape
x_positions = width - self.im_width
y_positions = height - self.im_height
x = 0
y = 0
if x_positions > 0:
x = int(np.random.rand() * x_positions)
if y_positions > 0:
y = int(np.random.rand() * y_positions)
# crop the image to the right size and with the random shift
image = image[y:self.im_height + y, x:self.im_width + x]
# flip horizontally randomly
if np.random.rand() < self.horizontal_flip_prob:
image = cv.flip(image, 1)
# # flip vertically randomly
# if np.random.rand() < self.vertical_flip_prob:
# image = cv.flip(image, 0)
brightness_modifier = get_rand_val([0.1, 1.9])
image = image.astype(np.float) * brightness_modifier
image = np.minimum(np.maximum(image, 0), 255).astype(np.uint8)
return image
def get_rand_rot_mat(self):
in_plane_axis = np.array([0, 0, 1.0])
in_plane_rot = get_rand_val([0, 2 * np.pi])
in_plane_mat, _ = cv.Rodrigues(in_plane_axis * in_plane_rot)
oop_rot = get_rand_val([0, self.max_oop_rot])
# debug
# oop_rot = self.max_oop_rot
# debug
oop_axis_orientation = get_rand_val([0, 2 * np.pi])
oop_axis = np.array([np.cos(oop_axis_orientation), np.sin(oop_axis_orientation), 0])
oop_mat, _ = cv.Rodrigues(oop_axis * oop_rot)
rot_mat = np.matmul(oop_mat, in_plane_mat)
return rot_mat
def get_camera_params(self, width_fov):
f = self.im_width / (2 * np.tan(width_fov / 2))
cx = self.im_width / 2
cy = self.im_height / 2
cam_mat = np.eye(3)
cam_mat[1, 1] = f
cam_mat[0, 0] = f
cam_mat[0, 2] = cx
cam_mat[1, 2] = cy
return cam_mat, f, cx, cy
def get_rand_marker(self, polygon, binary_square=False):
marker_pixels = self.im_height
mborder_size = int(marker_pixels / 8)
mimage_size = marker_pixels + 2 * mborder_size
marker_image = []
marker_id = 0
if binary_square:
bs_image = (np.random.rand(8, 8) * 2).astype(np.uint8) * 255
# make sure it is not aruco
x = int(np.random.rand() * 8)
y = int(np.random.rand() * 8)
i = int(np.random.rand() * 2)
if x != 0 and x != 7 and i == 0:
y = int(y / 4) * 7
if y != 0 and y != 7 and i == 1:
x = int(x / 4) * 7
bs_image[y, x] = 255
marker_image = cv.resize(bs_image, (marker_pixels, marker_pixels), interpolation=cv.INTER_NEAREST)
else:
if self.id_perm_index == 0:
self.ids_permutation = np.random.permutation(250)
marker_id = self.ids_permutation[self.id_perm_index] # int(np.random.rand() * 250)
self.id_perm_index = (self.id_perm_index + 1) % 250
bs_image = np.zeros((8, 8), np.uint8)
code_mat = np.zeros((6, 6), np.uint8)
matrix_in_dict = True
marker_code = MIP36h12[marker_id]
for i in reversed(range(6)):
for j in reversed(range(6)):
code_mat[i][j] = marker_code % 2
marker_code = int(marker_code / 2)
bs_image[1:7, 1:7] = code_mat * 255
marker_image = cv.resize(bs_image, (marker_pixels, marker_pixels), interpolation=cv.INTER_NEAREST)
marker_image = cv.copyMakeBorder(marker_image, mborder_size, mborder_size, mborder_size, mborder_size,
cv.BORDER_CONSTANT, value=(255, 255, 255))
# increase the black value and decrease the white value to be able to work with different illuminations
n = 50.0
cn = float(int(1 + np.random.rand() * (n - 2))) # contrast numerator
white_val = 255 / (cn / n + 1)
black_val = white_val * (cn / n)
marker_image = marker_image * (white_val - black_val) / 255.0 + np.ones((mimage_size, mimage_size)) * black_val
# apply illumination
illum_generator = np.random.rand() * 2 - 1
if illum_generator >= 0:
illumination = 255 / white_val * illum_generator
else:
illumination = pow(white_val - black_val, illum_generator)
add_color = np.random.rand() > 0.5
if add_color:
illum_color = (np.random.rand(1, 1, 3) * 2 + 1) / 3
illum_color /= illum_color[0, 0, 0] * 0.114 + illum_color[0, 0, 1] * 0.587 + illum_color[0, 0, 2] * 0.229
illumination *= illum_color
# print("White val: " + str(white_val))
# print("black val: " + str(black_val))
#
# print(
# "White Range: [" + str(white_val / (white_val - black_val)) + "," + str(250 * white_val / black_val) + "]")
# print(
# "Black Range: [" + str(black_val / (white_val - black_val)) + "," + str(250 * black_val / black_val) + "]")
add_shadow = np.random.rand() > 0.5
illum_map = []
if add_shadow:
illum_map_small = np.minimum(np.rint(np.random.rand(3, 3)) + np.random.rand() * 2.0 / 3 + 1.0 / 3, 1.0)
illum_map_big = cv.resize(illum_map_small, (mimage_size, mimage_size), interpolation=cv.INTER_CUBIC)
illum_map = np.empty((mimage_size, mimage_size, 1))
illum_map[..., 0] = illum_map_big
marker_color_image = np.empty((mimage_size, mimage_size, 3))
for i in range(3):
marker_color_image[..., i] = marker_image
# random color change
# rand_color_range = 3
# rand_color_change = np.random.rand(1, 1, 3) * rand_color_range
# form the final image
if add_shadow:
marker_image = np.rint(marker_color_image * (illum_map * illumination))
else:
marker_image =
|
np.rint(marker_color_image * illumination)
|
numpy.rint
|
# Written by <NAME>, Seoul National University (<EMAIL>)
import os
import numpy as np
from collections import OrderedDict
import torch
import torch.nn.functional as F
from src.evaluate import compute_metrics
from src.utils import Print
class Trainer():
""" train / eval helper class """
def __init__(self, model):
self.model = model
self.optim = None
self.scheduler = None
self.class_weight = None
# initialize logging parameters
self.train_flag = False
self.epoch = 0.0
self.best_loss = None
self.logger_train = Logger()
self.logger_eval = Logger()
def train(self, batch, device):
# training of the model
batch = set_device(batch, device)
self.model.train()
self.optim.zero_grad()
inputs, labels = batch
outputs = self.model(inputs)
loss = get_loss(outputs, labels, self.class_weight)
loss.backward()
self.optim.step()
# logging
self.logger_train.update(len(outputs), loss.item())
self.logger_train.keep(F.softmax(outputs, dim=1), labels)
def evaluate(self, batch, device):
# evaluation of the model
batch = set_device(batch, device)
self.model.eval()
with torch.no_grad():
inputs, labels = batch
outputs = self.model(inputs)
loss = get_loss(outputs, labels, self.class_weight).item()
# logging
self.logger_eval.update(len(outputs), loss)
self.logger_eval.keep(F.softmax(outputs, dim=1), labels)
def scheduler_step(self):
# scheduler_step
self.scheduler.step()
def save_model(self, save_prefix):
# save state_dicts to checkpoint """
if save_prefix is None: return
elif not os.path.exists(save_prefix + "/checkpoints/"):
os.makedirs(save_prefix + "/checkpoints/", exist_ok=True)
state = self.model.state_dict()
torch.save(state, save_prefix + "/checkpoints/%d.pt" % self.epoch)
def save_outputs(self, save_prefix):
# save state_dicts to checkpoint """
if save_prefix is None: return
self.logger_eval.evaluate(train=False)
np.save(save_prefix + "/Y.npy", self.logger_eval.labels)
|
np.save(save_prefix + "/P.npy", self.logger_eval.outputs)
|
numpy.save
|
__author__ = '<NAME>'
__project__ = 'noise vs resolution'
"""Copyright 2020. All rights reserved.
This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable
for any damages arising from the use of this software.
Permission is granted to anyone to use this software within the scope of evaluating mutli-analyte sensing. No permission
is granted to use the software for commercial applications, and alter it or redistribute it.
This notice may not be removed or altered from any distribution.
"""
import matplotlib
import matplotlib.pylab as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from mpl_toolkits.mplot3d import Axes3D
from shapely.geometry import LineString
import cv2
import math
import multiprocessing as mp
import seaborn as sns
import pandas as pd
import numpy as np
import random
from lmfit import Model
import scipy.signal
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
from mcerp import *
from uncertainties import *
from uncertainties import unumpy
import h5py
import os
from glob import glob
from PIL import Image
import datetime
# global variables
sns.set(style="darkgrid")
sns.set_context('paper')
col = ['#14274e', '#f6830f', '#bb2205']
mark = ['o', 'd']
fs = 13
depth_lim = dict({'optode1': (-5, 4), 'optode2': (-5, 4)})
# =====================================================================================
def prep_plotting_avSD(error, dfoptode, uncer_op1, uncer_op2):
if error == 'SD' or error == 'sd':
df_error = [pd.DataFrame([[i.s for i in dfoptode[en][se]] for se in dfoptode[en].columns],
columns=dfoptode[en].index, index=dfoptode[en].columns).T
for en in range(len(dfoptode))]
else:
df_error = [uncer_op1['sem'], uncer_op2['sem']]
return df_error
def prepPlot_optodeSet(o, s, error, dfoptode, uncer_op1, uncer_op2):
if '1' in o:
dfop, optode_sem, interpol = dfoptode[0], uncer_op1['sem'], uncer_op1['SD_interpol'][s]
optode_sd = pd.DataFrame([[i.s for i in dfop[se]] for se in dfop.columns], index=dfop.columns,
columns=dfop.index).T
else:
dfop, optode_sem, interpol = dfoptode[1], uncer_op2['sem'], uncer_op2['SD_interpol'][s]
optode_sd = pd.DataFrame([[i.s for i in dfop[se]] for se in dfop.columns], index=dfop.columns,
columns=dfop.index).T
if error == 'SD':
dferr = optode_sd
else:
dferr = optode_sem
return dfop, interpol, dferr
def prepPlot_SVerrprop(error, dop1_value, dop2_value, op1_normSEM, op2_normSEM):
if error == 'SD' or error == 'sd':
derror1 = dict(map(lambda s: (s, dop1_value[s][['O2 SD', 'iratio SD']]), dop1_value.keys()))
derror2 = dict(map(lambda s: (s, dop2_value[s][['O2 SD', 'iratio SD']]), dop2_value.keys()))
else:
derror1, derror2 = op1_normSEM, op2_normSEM
for s in derror1.keys():
derror1[s].columns, derror2[s].columns = ['O2', 'iratio'], ['O2', 'iratio']
derror = [derror1, derror2]
return derror
def prepPlot_SVerrprop_ex(o, s, error, dop1_value=None, dop1_param=None, op1_normSEM=None, f1inter_mc=None,
dop2_value=None, dop2_param=None, op2_normSEM=None, f2inter_mc=None):
if '1' in o:
ls_df = [dop1_value, dop1_param, op1_normSEM, f1inter_mc]
if any(i == None for i in ls_df):
raise ValueError('To plot the example, provide all relevant data! Please check dop_value, dop_param, '
', op_normSEM, and finter_mc')
dfop, dop_para, df_SEM, finter_mc = dop1_value[s], dop1_param[s], op1_normSEM[s], f1inter_mc[s]
else:
ls_df = [dop2_value, dop2_param, op2_normSEM, f2inter_mc]
if any(i == None for i in ls_df):
raise ValueError('To plot the example, provide all relevant data! Please check dop_value, dop_param, '
', op_normSEM, and finter_mc')
dfop, dop_para, df_SEM, finter_mc = dop2_value[s], dop2_param[s], op2_normSEM[s], f2inter_mc[s]
if error == 'SD' or error == 'sd':
dferr = dfop[['O2 SD', 'iratio SD']]
else:
dferr = pd.concat([df_SEM['O2'], pd.DataFrame([i.s for i in df_SEM['iratio']], index=df_SEM.index)], axis=1)
dferr.columns = ['O2', 'iratio']
return dfop, dop_para, df_SEM, dferr, finter_mc
def prepMS_plot(index_lp, dic_micro, offset):
# microsensor preparation
df_micro = dic_micro['run1'].set_index('Intensity (mV)')
df_micro['Depth (mm)'] = df_micro['Depth (µm)'] / 1000 # depth in mm
# microsensor extension to same depth as selected for the optode
df_ms = pd.DataFrame([df_micro['Depth (mm)'].index, df_micro['Depth (mm)']], index=['Intensity', 'Depth (mm)']).T
xnew = np.linspace(1, len(df_ms.index), num=int(len(df_ms.index)))
df_ms.index = xnew
df_ms.loc[0, :] = [df_ms['Intensity'].loc[:3].to_numpy().mean(), index_lp[0] * 1.05]
df_ms = df_ms.sort_index()
df_ms.loc[xnew[-1] + 1, :] = [df_ms['Intensity'].loc[df_ms.shape[0] - 3:].to_numpy().mean(), index_lp[-1] * 1.05]
df_ms = df_ms.sort_index()
df_ms['Depth (mm)'] = [i - offset for i in df_ms['Depth (mm)']]
return df_ms
def sgolay2d(z, window_size, order, derivative=None):
# number of terms in the polynomial expression
n_terms = (order + 1) * (order + 2) / 2.0
if window_size % 2 == 0:
raise ValueError('window_size must be odd')
if window_size**2 < n_terms:
raise ValueError('order is too high for the window size')
half_size = window_size // 2
# exponents of the polynomial: p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ...
# this line gives a list of two item tuple. Each tuple contains the exponents of the k-th term. First element of
# tuple is for x second element for y.
exps = [(k-n, n) for k in range(order+1) for n in range(k+1)]
# coordinates of points
ind = np.arange(-half_size, half_size+1, dtype=np.float64)
dx = np.repeat(ind, window_size)
dy = np.tile(ind, [window_size, 1]).reshape(window_size**2, )
# build matrix of system of equation
A = np.empty((window_size**2, len(exps)))
for i, exp in enumerate(exps):
A[:, i] = (dx**exp[0]) * (dy**exp[1])
# pad input array with appropriate values at the four borders
new_shape = z.shape[0] + 2*half_size, z.shape[1] + 2*half_size
Z = np.zeros((new_shape))
# top band
band = z[0, :]
Z[:half_size, half_size:-half_size] = band - np.abs( np.flipud(z[1:half_size+1, :]) - band)
# bottom band
band = z[-1, :]
Z[-half_size:, half_size:-half_size] = band + np.abs(np.flipud(z[-half_size-1:-1, :]) -band)
# left band
band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, :half_size] = band - np.abs(np.fliplr(z[:, 1:half_size+1]) - band)
# right band
band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, -half_size:] = band + np.abs(np.fliplr(z[:, -half_size-1:-1]) - band)
# central band
Z[half_size:-half_size, half_size:-half_size] = z
# top left corner
band = z[0, 0]
Z[:half_size, :half_size] = band - np.abs(np.flipud(np.fliplr(z[1:half_size+1, 1:half_size+1])) - band)
# bottom right corner
band = z[-1, -1]
Z[-half_size:, -half_size:] = band + np.abs(np.flipud(np.fliplr(z[-half_size-1:-1, -half_size-1:-1])) - band)
# top right corner
band = Z[half_size, -half_size:]
Z[:half_size, -half_size:] = band - np.abs(np.flipud(Z[half_size+1:2*half_size+1, -half_size:]) - band)
# bottom left corner
band = Z[-half_size:, half_size].reshape(-1, 1)
Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size+1:2*half_size+1]) - band)
# solve system and convolve
if derivative == None:
m = np.linalg.pinv(A)[0].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, m, mode='valid')
elif derivative == 'col':
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -c, mode='valid')
elif derivative == 'row':
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -r, mode='valid')
elif derivative == 'both':
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -r, mode='valid'), scipy.signal.fftconvolve(Z, -c, mode='valid')
# ------------------------------------------------------------------------
def plot_optode_avSD_v1(conc, dfoptode, error, col, mark, fs, RoI_op):
fig2, ax2 = plt.subplots(figsize=(8, 8), nrows=3, ncols=len(RoI_op), sharex=True, sharey=True, frameon=False)
if len(RoI_op) == 1:
ax2[0].set_title('Optode with different settings', fontsize=fs * 0.9)
else:
for n in range(len(RoI_op)):
ax2[0][n].set_title('Optode-' + str(n+1), fontsize=fs*0.9)
# plotting part
ls_handels = list()
if len(RoI_op) == 1:
for en in range(len(dfoptode[0].columns)):
l = ax2[en].errorbar(conc, [i.n for i in dfoptode[0][en]], error[0][en].values, linestyle='None',
marker=mark[0], fillstyle='none', color=col[en], ms=6, capsize=6, label=en)
ls_handels.append(l)
else:
for o in range(len(RoI_op)):
for en, s in enumerate(dfoptode[o].columns):
l = ax2[en][o].errorbar(conc, [i.n for i in dfoptode[o][s]], error[o][s].values, linestyle='None',
marker=mark[0], fillstyle='none', color=col[en], ms=6, capsize=6,
label=s.split('t')[0] + 'tting ' + s.split('t')[-1])
if o == 1:
ls_handels.append(l)
# legend and axis layout / labelling
if len(RoI_op) == 1:
ax2[1].legend(handles=ls_handels, loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True, fancybox=True)
else:
ax2[1][len(RoI_op)-1].legend(handles=ls_handels, loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True,
fancybox=True)
if len(RoI_op) == 1:
ax2[0].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # top row
ax2[1].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # middle row
ax2[2].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # bottom row
else:
for o in range(len(RoI_op)):
ax2[0][o].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # top row
ax2[1][o].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # middle row
ax2[2][o].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # bottom row
# x,y label position
fig2.text(0.5, 0.075, 'O$_2$ concentration [%air]', va='center', ha='center', fontsize=fs * 1.2)
fig2.text(0.025, 0.55, 'Ratio $R/G$', va='center', ha='center', rotation='vertical', fontsize=fs * 1.2)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.85, top=0.95)
plt.show()
return fig2, ax2
def plot_optode_set(o, s, conc, xinter, dfop, interpol, optode_sem, fs=11):
fig2, ax2 = plt.subplots(figsize=(5, 3), frameon=False)
ax2.set_title(o, fontsize=fs*0.9)
# plotting part
ax2.errorbar(conc, [i.n for i in dfop[s]], optode_sem[s].values, linestyle='None', marker=mark[int(s[-1])-1],
fillstyle='none', color=col[int(s[-1])-1], ms=6, capsize=5, label=s)
ax2.fill_between(x=xinter, y1=interpol[0](xinter), y2=interpol[1](xinter), color=col[int(s[-1])-1], alpha=0.2, lw=0)
# legend and axis layout / labelling
ax2.legend(loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True, fancybox=True)
ax2.tick_params(axis='both', which='both', direction='out', labelsize=fs*0.8)
# x,y label position
ax2.set_xlabel('O$_2$ concentration [%air]', va='center', ha='center', fontsize=fs*0.9)
ax2.set_ylabel('Ratio $R/G$', va='center', ha='center', rotation='vertical', fontsize=fs*0.9)
plt.tight_layout()
plt.show()
return fig2, ax2
def plot_SVerrorprop(dop1_value, dop1_param, derror, f1inter_mc, RoI1_av, RoI2_av=None, dop2_value=None,
dop2_param=None, f2inter_mc=None, fs=11.):
n = 1
if RoI2_av:
n += 1
ls = [dop2_value, dop2_param, f2inter_mc]
if any([i == None for i in ls]):
raise ValueError('To plot both optodes, all data are required! Please check dop_value, dop_param, '
'and finter_mc')
# -----------------------------------------------------------------------------------------
fig2, ax2 = plt.subplots(figsize=(8, 8), nrows=3, ncols=n, sharex=True, sharey=True, frameon=False)
if n == 1:
ax2[0].set_title('Optode 1', fontsize=fs*0.9)
else:
ax2[0][0].set_title('Optode 1', fontsize=fs*0.9), ax2[0][1].set_title('Optode 2', fontsize=fs*0.9)
num = int(100/0.5 + 1)
xnew = np.linspace(0, 100, num=num)
ls_handels = list()
if RoI1_av:
for en, s in enumerate(dop1_value.keys()):
name = s.split('t')[0] + 'tting ' + s.split('t')[-1]
O2new = np.linspace(dop1_value[s]['O2 mean'].loc[0], dop1_value[s]['O2 mean'].loc[100], num=num)
ynew = _simplifiedSV(xnew, k=dop1_param[s]['k'].mean, f=dop1_param[s]['f'].mean)
ydata = f1inter_mc[s]
# dashed line for bestFit
if n == 1:
ax2[en].plot(xnew, ynew, ls='-.', lw=1., color=col[en], label='bestFit')
l = ax2[en].errorbar(dop1_value[s]['O2 mean'], dop1_value[s]['iratio mean'].values, capsize=6, ms=6,
xerr=derror[0][s]['O2'].values, linestyle='None', marker=mark[0], color=col[en],
yerr=derror[0][s]['iratio'].values, fillstyle='none', label=name)
ax2[en].fill_between(x=O2new, y1=ydata[0](O2new), y2=ydata[1](O2new), color=col[en], alpha=0.2, lw=0)
ls_handels.append(l)
else:
ax2[en][0].plot(xnew, ynew, ls='-.', lw=1., color=col[en], label='bestFit')
l = ax2[en][0].errorbar(dop1_value[s]['O2 mean'], dop1_value[s]['iratio mean'].values, capsize=6, ms=6,
xerr=derror[0][s]['O2'].values, linestyle='None', marker=mark[0], color=col[en],
fillstyle='none', label=name)
ax2[en][0].fill_between(x=O2new, y1=ydata[0](O2new), y2=ydata[1](O2new), color=col[en], lw=0, alpha=0.2)
ls_handels.append(l)
ls_handels = list()
if RoI2_av:
for en, s in enumerate(dop2_value.keys()):
name = s.split('t')[0] + 'tting ' + s.split('t')[-1]
O2new = np.linspace(dop2_value[s]['O2 mean'].loc[0], dop2_value[s]['O2 mean'].loc[100], num=num)
ynew = _simplifiedSV(xnew, k=dop2_param[s]['k'].mean, f=dop2_param[s]['f'].mean)
ydata = f2inter_mc[s]
# dashed line for bestFit
if n == 1:
ax2[en].plot(xnew, ynew, ls='-.', lw=1., color=col[en], label='bestFit')
l = ax2[en].errorbar(dop2_value[s]['O2 mean'], dop2_value[s]['iratio mean'].values, capsize=6, ms=6,
xerr=derror[1][s]['O2'].values, linestyle='None', marker=mark[0], color=col[en],
fillstyle='none', label=name)
ax2[en].fill_between(x=O2new, y1=ydata[0](O2new), color=col[en], alpha=0.2, lw=0, y2=ydata[1](O2new))
ls_handels.append(l)
else:
ax2[en][1].plot(xnew, ynew, ls='-.', lw=1., color=col[en], label='bestFit')
l = ax2[en][1].errorbar(dop2_value[s]['O2 mean'], dop2_value[s]['iratio mean'].values, capsize=6, ms=6,
xerr=derror[1][s]['O2'].values, linestyle='None', marker=mark[0], color=col[en],
fillstyle='none', label=name)
ax2[en][1].fill_between(x=O2new, y1=ydata[0](O2new), color=col[en], lw=0, alpha=0.2, y2=ydata[1](O2new))
ls_handels.append(l)
# legend and axis layout / labelling
if n == 1:
ax2[1].legend(handles=ls_handels, loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True, fancybox=True)
else:
ax2[1][1].legend(handles=ls_handels, loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True, fancybox=True)
# x,y label position
fig2.text(0.5, 0.018, 'O$_2$ concentration [%air]', va='center', ha='center', fontsize=fs*1.2)
fig2.text(0.025, 0.55, 'Ratio $R/G$', va='center', ha='center', rotation='vertical', fontsize=fs*1.2)
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.85, top=0.95)
plt.show()
return fig2, ax2
def plot_optode_set_SV(o, s, en, dfop, dop_para, dferr, finter_mc, fs=11):
fig2, ax2 = plt.subplots(figsize=(5, 3), frameon=False)
title = o + ' - ' + s
ax2.set_title(title, loc='left', fontsize=fs * 0.9)
xnew = np.linspace(0, 100, num=int(100 / 0.5 + 1))
O2new = np.linspace(dfop['O2 mean'].loc[0], dfop['O2 mean'].loc[100], num=int(100 / 0.5 + 1))
ynew = _simplifiedSV(xnew, k=dop_para['k'].mean, f=dop_para['f'].mean)
ax2.plot(xnew, ynew, ls='-.', lw=1., color=col[en - 1], label='bestFit')
ax2.errorbar(dfop['O2 mean'], dfop['iratio mean'].values, capsize=6, xerr=dferr['O2'].values,color=col[en - 1],
linestyle='None', marker=mark[0], fillstyle='none', ms=6, label=s)
ax2.fill_between(x=O2new, y1=finter_mc[0](O2new), y2=finter_mc[1](O2new), color=col[en - 1], alpha=0.2, lw=0)
# x,y label position
fig2.text(0.5, 0.04, 'O$_2$ concentration [%air]', va='center', ha='center', fontsize=fs)
fig2.text(0.025, 0.55, 'Ratio $R/G$', va='center', ha='center', rotation='vertical', fontsize=fs)
plt.subplots_adjust(left=0.1, bottom=0.2, right=0.95, top=0.9)
plt.show()
return fig2, ax2
def plot_wholeImage3D(dO2_mean, unit, pad=2):
xx, yy = np.meshgrid(dO2_mean.index.to_numpy(), dO2_mean.columns.to_numpy())
# 3D image of full area
fig = plt.figure(figsize=(10, 8))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(xx, yy, dO2_mean.T.fillna(limit=5, method='ffill'), cmap='magma_r', linewidth=0, vmin=0,
vmax=100, antialiased=False, rstride=5, cstride=10)
cbar = fig.colorbar(surf, aspect=20, shrink=0.8)
ax.view_init(16, 45)
ax.tick_params(axis='x', labelsize=fs*0.9)
ax.tick_params(axis='y', labelsize=fs*0.9)
ax.tick_params(axis='z', labelsize=fs*0.9)
cbar.ax.tick_params(labelsize=fs*0.8)
ax.set_xlabel('Image height [{}]'.format(unit), fontsize=fs, labelpad=pad)
ax.set_ylabel('Image width [{}]'.format(unit), fontsize=fs, labelpad=pad)
ax.set_zlabel('$O_2$ concentration [%air]', fontsize=fs, labelpad=pad)
plt.tight_layout()
plt.draw()
return fig, ax
def plot_optode2D(o, s, px2mm, surface, dO2_av, depth_range, width_range, figsize=(6, 2), unit='mm', fs=11, vmin=None,
vmax=None):
# prepare optode for plotting; baseline correction and cropping the image to the depth and width of interest
df_data = optodePrep2D(o=o, s=s, px2mm=px2mm, baseline=surface, dO2_av=dO2_av, depth_range=depth_range,
width_range=width_range)
# resetting the axis ticks with extent
extent = [df_data.columns[0], df_data.columns[-1], # x-axis, e.g. columns
df_data.index[0], df_data.index[-1]] # y-axis, e.g. index
# plotting
fig, ax = plt.subplots(figsize=figsize)
sur = ax.imshow(df_data, extent=extent, cmap='magma_r', vmin=vmin, vmax=vmax)
if vmin is None:
vmin = int(df_data.min().min())
if vmax is None:
vmax = int(df_data.max().max())
plt.colorbar(sur, shrink=0.75, fraction=0.1, aspect=10, ticks=np.linspace(vmin, vmax, num=5))
ax.set_xlabel('Image width [{}]'.format(unit), fontsize=fs)
ax.set_ylabel('Image height [{}]'.format(unit), fontsize=fs)
plt.tight_layout()
return fig, ax
def plotLP(dO2_lp, df_ms, header_ms, depth, kshape, depth_lp, s, arg, dO2_optode=None):
# additional information
col_ = int(s[-1])-1
# figure creation
fig_lp = plt.figure(figsize=(arg['figsize']), dpi=100)
with plt.style.context('seaborn-darkgrid'):
ax1 = fig_lp.add_subplot(131)
ax2 = fig_lp.add_subplot(132, sharex=ax1, sharey=ax1)
ax3 = fig_lp.add_subplot(133, sharex=ax1, sharey=ax1)
if dO2_optode:
with plt.style.context('classic'):
ax11 = fig_lp.add_axes([0.13, 0.2, 0.2, 0.2])
ax21 = fig_lp.add_axes([0.44, 0.2, 0.2, 0.2])
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax31 = fig_lp.add_axes([0.75, 0.2, 0.2, 0.2])
ax1.set_title('(A) Horizontal blur', fontsize=fs, loc='left')
ax2.set_title('(B) Vertical blur', fontsize=fs, loc='left')
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax3.set_title('(C) Square blur', fontsize=fs, loc='left')
# plot line profile
# horizontal
df_h = dO2_lp[kshape]['horizontal'][arg['lw']].fillna(limit=5, method='ffill').loc[depth_lp[0]: depth_lp[1]]
ax1.plot(df_h['mean'].values, df_h.index, lw=arg['curve lw'], color=col[col_])
ax1.fill_betweenx(df_h.index, df_h['mean'].values - df_h['SD'].values, df_h['mean'].values + df_h['SD'].values,
facecolor=col[col_], alpha=0.25)
# vertical
df_v = dO2_lp[kshape]['vertical'][arg['lw']].fillna(limit=5, method='ffill').loc[depth_lp[0]: depth_lp[1]]
ax2.plot(df_v['mean'].values, df_v.index, lw=arg['curve lw'], color=col[col_])
ax2.fill_betweenx(df_v.index, df_v['mean'].values - df_v['SD'].values, df_v['mean'].values + df_v['SD'].values,
facecolor=col[col_], alpha=0.25)
# squared
if len(dO2_lp[kshape]['square'].keys()) == 0:
ax3.axis('off')
else:
df_s = dO2_lp[kshape]['square'][arg['lw']].fillna(limit=5, method='ffill').loc[depth_lp[0]: depth_lp[1]]
ax3.plot(df_s['mean'].values, df_s.index, lw=arg['curve lw'], color=col[col_])
ax3.fill_betweenx(df_s.index, df_s['mean'].values - df_s['SD'].values, df_s['mean'].values + df_s['SD'].values,
facecolor=col[col_], alpha=0.25)
# ..........................................
# 2D imshow
if dO2_optode:
opt_h = dO2_optode[kshape]['horizontal']
extent = [opt_h.columns[0], opt_h.columns[-1], # x-axis, e.g. columns
opt_h.index[-1], opt_h.index[0]] # y-axis, e.g. index
op1 = ax11.imshow(opt_h, extent=extent, aspect=arg['aspect'], cmap=arg['cmap'], vmin=arg['vmin op'],
vmax=arg['vmax op'])
op2 = ax21.imshow(dO2_optode[kshape]['vertical'], extent=extent, aspect=arg['aspect'], cmap=arg['cmap'],
vmin=arg['vmin op'], vmax=arg['vmax op'])
if len(dO2_lp[kshape]['square'].keys()) != 0:
op3 = ax31.imshow(dO2_optode[kshape]['square'], extent=extent, aspect=arg['aspect'], cmap=arg['cmap'],
vmin=arg['vmin op'], vmax=arg['vmax op'])
# color bar
fig_lp.colorbar(op1, aspect=10, shrink=0.8, ax=ax11)
fig_lp.colorbar(op2, aspect=10, shrink=0.8, ax=ax21)
if len(dO2_lp[kshape]['square'].keys()) != 0:
fig_lp.colorbar(op3, aspect=10, shrink=0.8, ax=ax31)
# ..........................................
# microsensor
ax1.plot(df_ms[header_ms[1]].to_numpy(), depth, lw=arg['curve lw'], color='black', label='microsensor')
ax2.plot(df_ms[header_ms[1]].to_numpy(), depth, lw=arg['curve lw'], color='black', label='microsensor')
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax3.plot(df_ms[header_ms[1]].to_numpy(), depth, lw=arg['curve lw'], color='black', label='microsensor')
# ..........................................
# adjust axes
ax1.set_xlim(arg['vmin'], arg['vmax'])
ax1.set_ylim(df_h.index[-1] * 1.05, df_h.index[0] * 1.05)
ax1.tick_params(labelsize=arg['fontsize']*0.9)
ax2.tick_params(labelsize=arg['fontsize']*0.9)
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax3.tick_params(labelsize=arg['fontsize']*0.9)
if dO2_optode:
ax11.tick_params(labelsize=arg['fontsize']*0.7)
ax21.tick_params(labelsize=arg['fontsize']*0.7)
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax31.tick_params(labelsize=arg['fontsize']*0.7)
ax11.set_xlabel('Width [mm]', fontsize=arg['fontsize']*0.7)
ax11.set_ylabel('Height [mm]', fontsize=arg['fontsize']*0.7)
ax21.set_xlabel('Width [mm]', fontsize=arg['fontsize']*0.7)
ax21.set_ylabel('Height [mm]', fontsize=arg['fontsize']*0.7)
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax31.set_xlabel('Width [mm]', fontsize=arg['fontsize']*0.7)
ax31.set_ylabel('Height [mm]', fontsize=arg['fontsize']*0.7)
fig_lp.text(0.4, 0.02, '$O_2$ concentration [%air]', fontsize=arg['fontsize'])
fig_lp.text(0.01, 0.48, 'Depth [mm]', fontsize=arg['fontsize'], rotation='vertical')
fig_lp.subplots_adjust(bottom=0.12, right=0.95, top=0.95, left=0.05, wspace=0.2, hspace=0.2)
return fig_lp
def plot_penetrationDepth(depth, ls_kernel, arg):
if isinstance(ls_kernel[0], tuple):
kernel_s = [k[1] for k in ls_kernel]
else:
kernel_s = ls_kernel
# .....................
fig, ax = plt.subplots(figsize=(5, 3.5))
for en, c in enumerate(depth.columns):
ax.plot(kernel_s, depth[c], lw=1., ls='-.', marker=arg['marker'][en], ms=7,
color=arg['colors'][en], fillstyle='none', label=c.split('-')[0] + ' blur')
ax.legend(loc=0, frameon=True, fancybox=True, fontsize=fs * 0.8)
ax.tick_params(axis='both', labelsize=fs * 0.8)
ax.set_xlabel('kernel size', fontsize=fs)
ax.set_ylabel('$O_2$ penetration depth [mm]', fontsize=fs)
plt.tight_layout()
return fig
# =====================================================================================
def crop_optode(dratio, RoI1, RoI2):
# optode 1
if RoI1 == None:
optode1 = None
else:
optode1 = dict()
for en, c in enumerate(dratio.keys()):
ls_av = list()
for av in range(len(RoI1)):
height = RoI1[av][1][1] - RoI1[av][0][1]
im_ratio = dratio[c][0][RoI1[av][0][1]:RoI1[av][1][1] + 1]
ls_av.append(np.stack([im_ratio[n][RoI1[av][0][0]:RoI1[av][2][1] + 1] for n in np.arange(height + 1)],
axis=0))
optode1[c] = ls_av
# -------------------------------------------------------------------------
# optode 2
if RoI2 == None:
optode2 = None
else:
optode2 = dict()
for en, c in enumerate(dratio.keys()):
ls_av = list()
for av in range(len(RoI2)):
height2 = RoI2[av][1][1] - RoI2[av][0][1]
im_ratio2 = dratio[c][1][RoI2[av][0][1]:RoI2[av][1][1] + 1]
ls_av.append(np.stack([im_ratio2[n][RoI2[av][0][0]:RoI2[av][2][1] + 1] for n in np.arange(height2 + 1)],
axis=0))
optode2[c] = ls_av
return optode1, optode2
def image_resolution(px, dist_mm, inch=None):
px2mm = px / dist_mm * 1
if inch:
dpi = px / inch
else:
dpi = None
return px2mm, dpi
def px2mm_conversion(df, px2mm, surface):
ind_new = df.index.to_numpy() / px2mm - surface
col_new = df.columns.to_numpy() / px2mm
df.index, df.columns = ind_new, col_new
return df
def round_decimals_up(number, decimals=2):
"""
Returns a value rounded up to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more")
elif decimals == 0:
return math.ceil(number)
factor = 10 ** decimals
return math.ceil(number * factor) / factor
def round_decimals_down(number, decimals=2):
"""
Returns a value rounded down to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more")
elif decimals == 0:
return math.floor(number)
factor = 10 ** decimals
return math.floor(number * factor) / factor
# =====================================================================================
def averaging_areas(doptode_set):
l = dict(map(lambda c: (c, pd.DataFrame([(np.mean(doptode_set[c][av]), np.std(doptode_set[c][av]))
for av in range(len(doptode_set[c]))], columns=['mean', 'SD_area'])),
doptode_set.keys()))
dfop_set = pd.concat(l, axis=0).sort_index(axis=0)
return dfop_set
def averaging_deriv(ls_x):
der = 1/len(ls_x) * ls_x
return der
def optode_normalization(dfop):
optode_norm = dict(map(lambda s: (s, dfop[s].loc[0] / dfop[s]), dfop.keys()))
# update calibration point zero
for s in optode_norm.keys():
sd = ((dfop[s][0].std_dev / dfop[s][0].n)**2)*2
optode_norm[s].loc[0] = ufloat(dfop[s][0].n / dfop[s][0].n, np.sqrt(sd) * dfop[s][0].n / dfop[s][0].n)
return optode_norm
def interpolation_SD(conc, dfop, s, method='cubic'):
f_min = interp1d(conc, [i.n-i.s for i in dfop[s]], kind=method)
f_max = interp1d(conc, [i.n+i.s for i in dfop[s]], kind=method)
return f_min, f_max
def interpolation_SDmc(df, s, method='cubic'):
I_min = interp1d(df[s]['O2 mean'].values, (df[s]['iratio mean'] - df[s]['iratio SD']).values, kind=method)
I_max = interp1d(df[s]['O2 mean'].values, (df[s]['iratio mean'] + df[s]['iratio SD']).values, kind=method)
return I_min, I_max
def channel_division(dconc, dch_num, dch_denom, s):
dratio = dict()
for c in dconc[s]:
dratio[c] = [dch_num[s][str(c)+'%'][n] / dch_denom[s][str(c)+'%'][n] for n in range(len(dch_num[s][str(c)+'%']))]
return dratio
def ratiometric_intensity(path, crop_op, channel, RoI1=None, RoI2=None):
# RoI are areas defined anti-clockwise starting from the top left corner with P(col / ind)
if crop_op:
pass
else:
RoI_op_ = RoI1 + RoI2
crop_op = [[(RoI_op_[o][p][0] + 5, RoI_op_[o][p][1] + 5) for p in range(len(RoI_op_[o]))]
for o in range(len(RoI_op_))]
# -------------------------------
# RoI for different settings
height = list(map(lambda n: (crop_op[n][1][1] - crop_op[n][0][1]), range(len(crop_op))))
# load all calibration images - collect information
dict_red, dict_green, dict_conc = load_calibration_info(path=path, RoI1=crop_op, height=height, channel=channel,
server=False)
# calculating the ratio R/G of the whole optode
dratio = dict(map(lambda k: (k, channel_division(dconc=dict_conc, dch_num=dict_red, dch_denom=dict_green, s=k)),
dict_red.keys()))
# combine cropped info
optode1 = dict(map(lambda s: (s, crop_optode(dratio[s], RoI1=RoI1, RoI2=RoI2)[0]), dratio.keys()))
optode2 = dict(map(lambda s: (s, crop_optode(dratio[s], RoI1=RoI1, RoI2=RoI2)[1]), dratio.keys()))
# determine number of pixels within the defined RoI = sample size
if RoI1:
npx1 = (RoI1[0][1][1] - RoI1[0][0][1]) * (RoI1[0][2][0] - RoI1[0][0][0])
else:
npx1 = 0
if RoI2:
npx2 = (RoI2[0][1][1] - RoI2[0][0][1]) * (RoI2[0][2][0] - RoI2[0][0][0])
else:
npx2 = 0
# ----------------------------------------------------------
# signal averaged within RoI used as start/input signal for uncertainty propagation averaging each RoI for all
# optodes and settings
if RoI1: # optode 1
dfop1_set1 = averaging_areas(doptode_set=optode1['set1'])
dfop1_set2 = averaging_areas(doptode_set=optode1['set2'])
dfop1_set3 = averaging_areas(doptode_set=optode1['set3'])
conc = dfop1_set1.index.levels[0].to_numpy()
dfop1 = dict({'set1': [ufloat(dfop1_set1.loc[i, 'mean'], dfop1_set1.loc[i, 'SD_area']) for i in dfop1_set1.index],
'set2': [ufloat(dfop1_set2.loc[i, 'mean'], dfop1_set2.loc[i, 'SD_area']) for i in dfop1_set2.index],
'set3': [ufloat(dfop1_set3.loc[i, 'mean'], dfop1_set3.loc[i, 'SD_area']) for i in dfop1_set3.index]})
dfop1 = pd.DataFrame(dfop1, index=conc)
else:
dfop1 = None
if RoI2: # optode 2
dfop2_set1 = averaging_areas(doptode_set=optode2['set1'])
dfop2_set2 = averaging_areas(doptode_set=optode2['set2'])
dfop2_set3 = averaging_areas(doptode_set=optode2['set3'])
conc = dfop2_set1.index.levels[0].to_numpy()
dfop2 = dict({'set1': [ufloat(dfop2_set1.loc[i, 'mean'], dfop2_set1.loc[i, 'SD_area']) for i in dfop2_set1.index],
'set2': [ufloat(dfop2_set2.loc[i, 'mean'], dfop2_set2.loc[i, 'SD_area']) for i in dfop2_set2.index],
'set3': [ufloat(dfop2_set3.loc[i, 'mean'], dfop2_set3.loc[i, 'SD_area']) for i in dfop2_set3.index]})
dfop2 = pd.DataFrame(dfop2, index=conc)
else:
dfop2 = None
# prepare for output
dfoptode = [dfop1, dfop2]
para = dict({'sample size': (npx1, npx2), 'concentration': conc, 'ch1': dict_red, 'ch2': dict_green})
return dfoptode, para
def reduce_dict(name, dint1, dint2=None, nopt=1, option='ratio'):
if option == 'ratio':
dop1 = dict(map(lambda s: (s, np.divide(dint1[s][name][0], dint2[s][name][0])), dint1.keys()))
if nopt > 1:
dop2 = dict(map(lambda s: (s, np.divide(dint1[s][name][1], dint2[s][name][1])), dint1.keys()))
else:
dop2 = None
else:
dop1 = dict(map(lambda s: (s, dint1[s][name][0]), dint1.keys()))
if nopt > 1:
dop2 = dict(map(lambda s: (s, dint1[s][name][1]), dint1.keys()))
else:
dop2 = None
dint = dict({'optode1': dop1, 'optode2': dop2})
return dint
def splitImage(path, RoI_op):
# RoI for different settings
height = dict(map(lambda o: (o, RoI_op[o][1][1] - RoI_op[o][0][1]), range(len(RoI_op))))
dict_red, dict_green = load_files(path, RoI_op, height)
# split into smaller dictionaries
name = list(dict_red['set1'].keys())[0]
dint_red = reduce_dict(name=name, dint1=dict_red, dint2=None, nopt=len(RoI_op), option='single')
dint_green = reduce_dict(name=name, dint1=dict_green, dint2=None, nopt=len(RoI_op), option='single')
dint_ratio = reduce_dict(name=name, dint1=dict_red, dint2=dict_green, nopt=len(RoI_op), option='ratio')
return dint_red, dint_green, dint_ratio
def split2statics(dO2):
# mean value
dic_av = dict(map(lambda o:
(o, dict(map(lambda s:
(s, pd.DataFrame(list(map(lambda j: [i.n if i is not np.nan else i
for i in dO2[o][s][j]], dO2[o][s].columns)),
columns=dO2[o][s].index, index=dO2[o][s].columns).T),
dO2[o].keys()))), dO2.keys()))
# standard error
dic_sd = dict(map(lambda o:
(o, dict(map(lambda s:
(s, pd.DataFrame(list(map(lambda j: [i.s if i is not np.nan else i
for i in dO2[o][s][j]], dO2[o][s].columns)),
columns=dO2[o][s].index, index=dO2[o][s].columns).T),
dO2[o].keys()))), dO2.keys()))
return dic_av, dic_sd
def line_profile_v1(df, lp, lw):
if df.empty is True:
df_lp = None
else:
# find closest value in df.columns
diff_min, diff_max = (lp - lw / 2) - df.columns, (lp + lw / 2) - df.columns
for en, i in enumerate(diff_min):
if i == min(np.abs(diff_min)):
pos_min = (en, df.columns[en])
for en, i in enumerate(diff_max):
if i == min(np.abs(diff_max)):
pos_max = (en, df.columns[en])
if pos_min:
pass
else:
pos_min = (None, None)
if pos_max:
pass
else:
pos_max = (None, None)
if pos_min == pos_max:
df_lp = pd.DataFrame(df[pos_min[1]])
else:
df_lp = df.loc[:, pos_min[1]:pos_max[1]]
return df_lp
def optodePrep2D(o, s, dO2_av, px2mm, baseline, depth_range=None, width_range=None):
# image preparation and cropping image depth/width
df_ex = dO2_av[o][s]
xnew = df_ex.index
df = df_ex.copy()
df.index = reversed(xnew)
if depth_range is None:
df_ = df
else:
px_range = np.arange(0, len(df.index) + 1, step=1)
px_range_mm = px_range / px2mm - baseline[int(o.split('e')[-1])-1]
crop_px1 = list()
for en, p in enumerate(px_range_mm):
if p.round(1) == depth_range[0]:
crop_px1.append(en)
crop_px2 = list()
for en, p in enumerate(px_range_mm):
if p.round(1) == depth_range[1]:
crop_px2.append(en)
crop_px = int(np.mean(crop_px1)), int(np.mean(crop_px2))
df_ = df.loc[df.index[min(crop_px)]:df.index[max(crop_px)], :]
df_.index = reversed(df_ex.index[min(crop_px):max(crop_px) + 1])
if width_range is None:
df_data = df_
else:
df_data = df_.loc[:, width_range[0]:width_range[1]]
return df_data
def sem_optode(dfop, RoI, conc):
n = np.sqrt(sum([(RoI[i][1][1] - RoI[i][0][1])*(RoI[i][2][0] - RoI[i][0][0]) for i in range(len(RoI))]))
dfop_sem = dict(map(lambda s: (s, [i.s/n for i in dfop[s]]), dfop.keys()))
optode_sem = pd.concat(list(map(lambda s: pd.DataFrame([np.mean(dfop_sem[s][c:(c+1)]) for c in range(len(conc))],
index=conc, columns=[s]), dfop.keys())), axis=1)
return optode_sem, n
def uncertainty(para, RoI1, RoI2, conc, dfop1=None, dfop2=None, method='cubic'):
# interpolation for SD
if isinstance(dfop1, pd.DataFrame):
f_op1 = dict(map(lambda s: (s, interpolation_SD(conc=para['concentration'], dfop=dfop1, s=s, method=method)),
dfop1.columns))
# standard error of the mean
optode1_sem, n1 = sem_optode(dfop=dfop1, RoI=RoI1, conc=conc)
# combine for output
uncer_op1 = dict({'SD_interpol': f_op1, 'sem': optode1_sem, 'sample size': n1})
else:
uncer_op1 = None
if isinstance(dfop2, pd.DataFrame):
f_op2 = dict(map(lambda s: (s, interpolation_SD(conc=para['concentration'], dfop=dfop2, s=s, method=method)),
dfop2.columns))
# standard error of the mean
optode2_sem, n2 = sem_optode(dfop=dfop2, RoI=RoI2, conc=conc)
# combine for output
uncer_op2 = dict({'SD_interpol': f_op2, 'sem': optode2_sem, 'sample size': n2})
else:
uncer_op2 = None
return uncer_op1, uncer_op2
def lin_propagation(dfop1, dfop2, n1, n2, RoI1, RoI2, conc):
# normalization
if RoI1: # optode 1
optode1_norm = optode_normalization(dfop=dfop1)
else:
optode1_norm = None
if RoI2: # optode 2
optode2_norm = optode_normalization(dfop=dfop2)
else:
optode2_norm = None
optode_norm = [optode1_norm, optode2_norm]
# standard error of the mean
if RoI1:
optode1_norm_SEM = dict(map(lambda s: (s, pd.DataFrame([i.s / n1 for i in optode1_norm[s]],
index=optode1_norm[s].index)), optode1_norm.keys()))
# interpolation for SD
fnorm_op1 = dict(map(lambda s: (s, interpolation_SD(conc=conc, dfop=optode1_norm, s=s)), optode1_norm.keys()))
else:
optode1_norm_SEM, fnorm_op1 = None, None
if RoI2:
optode2_norm_SEM = dict(map(lambda s: (s, pd.DataFrame([i.s / n2 for i in optode2_norm[s]],
index=optode2_norm[s].index)), optode2_norm.keys()))
# interpolation for SD
fnorm_op2 = dict(map(lambda s: (s, interpolation_SD(conc=conc, dfop=optode2_norm, s=s)), optode2_norm.keys()))
else:
optode2_norm_SEM, fnorm_op2 = None, None
return optode_norm, optode1_norm_SEM, optode2_norm_SEM, fnorm_op1, fnorm_op2
def mc_propagation(conc, dfop, optode_norm, optode_norm_SEM, RoI, uncer_op):
dic_optode_value = dict()
dic_optode_param = dict()
for s in dfop.columns:
if RoI:
[dic_optode_param[s], dic_optode_value[s]] = mcerp_simplifiedSVFit(optode=optode_norm[s].to_numpy(),
conc=conc)
for s in dic_optode_param.keys():
if RoI:
dic_optode_param[s]['I0'] = dfop.loc[0][s]
# -------------------------------------------------------------------
# uncertainty propagation for SEM
# intensity
iratio_normSEM = dict(map(lambda s: (s, pd.Series([ufloat(optode_norm[s].loc[c].n, optode_norm_SEM[s].loc[c][0])
for c in optode_norm[s].index], index=optode_norm[s].index,
name=s)), optode_norm.keys()))
# concentration
ox_normSEM = dict(map(lambda s: (s, dic_optode_value[s]['O2 SD'] / uncer_op['sample size']),
dic_optode_value.keys()))
optode_normSEM = dict(map(lambda s: (s, pd.concat([ox_normSEM[s], iratio_normSEM[s]], axis=1,
keys=['O2 SEM', 'iratio SEM'])), iratio_normSEM.keys()))
return dic_optode_value, dic_optode_param, optode_normSEM
# =====================================================================================
def _simplifiedSV(x, f, k):
"""
fitting function according to the common two site model. In general, x represents the pO2 or pCO2 content, whereas
m, k and f are the common fitting parameters
:param x: list
:param k: np.float
:param f: np.float
:return: iratio: normalized signal i0/i
"""
return 1 / (f / (1. + k*x) + (1.-f))
def _simplified_SVFit_1run(data, conc, par0=None):
simply_sv = Model(_simplifiedSV)
if par0:
params_sens = simply_sv.make_params(k=par0['k'], f=par0['f'])
else:
params_sens = simply_sv.make_params(k=0.165, f=0.887)
params_sens['k'].min = 0.
params_sens['f'].max = 1.
params_sens['f'].vary = True
params_sens['k'].vary = True
# use i0/i data for fit and re-calculate i afterwards
# full concentration range
result = simply_sv.fit(data, params_sens, x=conc, nan_policy='omit')
return result
def mcerp_simplifiedSVFit(optode, conc):
# use simplifiedSV_run1 to calculate best fit
res = _simplified_SVFit_1run(data=[i.n for i in optode], conc=conc)
# evaluate the covariance matrix of your parameters
covariance = res.covar
# draw random samples from a normal multivariate distribution using the best value of your parameters
# and their covariance matrix
f = N(res.params['f'].value, res.params['f'].stderr**2) # stderr**2 := cov(f,f)
k = N(res.params['k'].value, res.params['k'].stderr**2)
y = [N(o.n, o.s) for o in optode]
params = dict({'f': f, 'k': k, 'covariance': covariance, 'fit result': res})
# calculate x for each point of the sample
O2_calc = O2_analysis_v2(f=f, k=k, iratio=y)
# estimate the mean and standard deviation of x
ox_out = [(O2_calc[ox].mean, np.sqrt(O2_calc[ox].var), optode[ox].n, optode[ox].s) for ox in range(len(conc))]
out = pd.DataFrame(ox_out, index=conc, columns=['O2 mean', 'O2 SD', 'iratio mean', 'iratio SD'])
return params, out
def o2_calculation(inp, dict_ratio_run1, dict_ratio_run2, dpara, surface, px2mm, splitdata=True, run=2, vmin=-50,
vmax=150):
o, s, run = inp.split(',')[0].strip(), inp.split(',')[1].strip(), int(run)
if run == 1:
dratio = dict_ratio_run1
else:
dratio = dict_ratio_run2
dO2_calc = dict()
for o in dratio.keys():
if dratio[o]:
dic_cal = dict(map(lambda s:
(s, O2_analysis_area(para=dpara[o][s], iratio=dratio[o][s])), dratio[o].keys()))
dO2_calc[o] = dic_cal
# post-processing
dO2, dO2_av, dO2_SD = postprocessing_v1(dO2_calc=dO2_calc, px2mm=px2mm, surface=surface, split=splitdata, vmin=vmin,
vmax=vmax)
return dO2, dO2_av, dO2_SD
def O2_analysis_v2(f, k, iratio):
"""
:param f: mcerp.UncertainVariable contaning a normal distributed sample of values around the best value of the
fit parameter f and its covariance value as sigma
:param k: mcerp.UncertainVariable contaning a normal distributed sample of values around the best value of
the fit parameter k and its covariance value as sigma
:param iratio: list of mcerp.UncertainVariables containing a normal distributed sample of the intensity ratio
(mu is the average value and sigma is the proagated error)
return x:
"""
# mean O2 concentration
x = [1/k * (f / ((1/y) + f -1) -1) for y in iratio]
return x
def O2_analysis_area(para, iratio, iratio_std=None, int_type='norm'):
"""
:param f: mcerp.UncertainVariable contaning a normal distributed sample of values around the best value of the
fit parameter f and its covariance value as sigma
:param k: mcerp.UncertainVariable contaning a normal distributed sample of values around the best value of the
fit parameter k and its covariance value as sigma
:param iratio: array of mcerp.UncertainVariables containing a normal distributed sample of the intensity ratio
(mu is the average value and sigma is the proagated error) or only mean values as np.float64
return x:
"""
# create ufloat for uncertainty propagation via parameter
f_mp = ufloat(para.loc['f'][0], para.loc['f'][1])
k_mp = ufloat(para.loc['k'][0], para.loc['k'][1])
if int_type == 'norm':
int_arr = unumpy.uarray(np.array(iratio.to_numpy()), np.array(iratio_std.to_numpy()))
else:
i0_mp = ufloat(para.loc['I0'][0], para.loc['I0'][1])
if isinstance(iratio, (np.ndarray, np.generic)):
iratio_arr = unumpy.uarray(iratio, np.array(np.zeros(shape=(iratio.shape))))
else:
iratio_arr = unumpy.uarray(iratio.values, np.array(np.zeros(shape=(iratio.shape))))
int_arr = iratio_arr / i0_mp
# intermediate value calculation for x = 1/k * (np.divide(f, np.divide(1, inorm) + f - 1) - 1)
a = int_arr + f_mp - 1
b = f_mp / a - 1
# final O2 concentration
x = 1 / k_mp * b
df_x = pd.DataFrame(x, index=pd.DataFrame(iratio).index, columns=pd.DataFrame(iratio).columns)
return df_x
# =====================================================================================
def fsigmoid(x, a, b, c):
return c / (1.0 + np.exp(-a * (x - b)))
def interpolation_microsensor(df_ms, profile_ex):
smodel = Model(fsigmoid)
# interpolation of microsensor to step width of optode
params = smodel.make_params(a=-15, b=1, c=50)
res_ms = smodel.fit(df_ms.loc[1:16, :]['Intensity'].to_numpy(), x=df_ms.loc[1:16, :]['Depth (mm)'].to_numpy(),
params=params)
xnew = profile_ex.index
ydata = fsigmoid(x=xnew, a=res_ms.best_values['a'], b=res_ms.best_values['b'], c=res_ms.best_values['c'])
data_ms = pd.DataFrame(ydata, index=xnew)
data_ms.columns = ['microsensor']
return data_ms
def geometric_intersection(treshold, dd, column):
# generate curve
second_line = LineString(np.column_stack((dd.index, [treshold]*dd.shape[0])))
first_line = LineString(np.column_stack((dd.index, dd[column].to_numpy())))
# geometric determination of intersection points
intersection = first_line.intersection(second_line)
try:
xdata = LineString(intersection).xy
except:
xdata = intersection.xy
return xdata
def penetration_depth(dO2_lp, ls_kernel, df_ms, treshold):
# combine relevant line profiles
dprofile = dict()
for kshape in ls_kernel:
if len(dO2_lp[kshape]['square'].keys()) != 0:
depth = pd.concat([dO2_lp[kshape]['vertical'][0], dO2_lp[kshape]['horizontal'][0],
dO2_lp[kshape]['square'][0]], axis=1)
col = dO2_lp[kshape].keys()
else:
depth = pd.concat([dO2_lp[kshape]['vertical'][0], dO2_lp[kshape]['horizontal'][0]], axis=1)
col = ['vertical', 'horizontal']
depth.columns = [i + '-' + j for i in col for j in ['mean', 'SD']]
dprofile[kshape] = depth
# exponential decay for interpolation of micro-sensor data close to the transition
data_ms = interpolation_microsensor(df_ms=df_ms, profile_ex=dprofile[ls_kernel[0]])
# geometric intersection of line profile and O2 threshold for penetration depth
dd = dict(map(lambda k: (k, pd.concat([dprofile[k].filter(like='mean'), data_ms], axis=1)), ls_kernel))
# minimal line profile
dd_min = dict(map(lambda k: (k, pd.concat([pd.DataFrame([dprofile[k][c + '-mean'] - dprofile[k][c + '-SD']
for c in col], index=col).T, data_ms['microsensor']],
axis=1)), ls_kernel))
# maximal line profile
dd_max = dict(map(lambda k: (k, pd.concat([pd.DataFrame([dprofile[k][c + '-mean'] + dprofile[k][c + '-SD']
for c in col], index=col).T, data_ms['microsensor']],
axis=1)), ls_kernel))
ydepth = pd.concat([pd.DataFrame([geometric_intersection(treshold=treshold, dd=dd[k], column=d)[0][0]
for d in dd[k].columns], index=dd[k].columns) for k in ls_kernel], axis=1).T
ydepth_min = pd.concat([pd.DataFrame([geometric_intersection(treshold=treshold, dd=dd_min[k], column=d)[0][0]
for d in dd_min[k].columns], index=dd_min[k].columns) for k in ls_kernel],
axis=1).T
ydepth_max = pd.concat([pd.DataFrame([geometric_intersection(treshold=treshold, dd=dd_max[k], column=d)[0][0]
for d in dd_max[k].columns], index=dd_max[k].columns) for k in ls_kernel],
axis=1).T
ydepth.index, ydepth_min.index, ydepth_max.index = ls_kernel, ls_kernel, ls_kernel
ydepth.columns = [i.split('-')[0] for i in ydepth.columns]
return ydepth, ydepth_min, ydepth_max
# =====================================================================================
def saving_res(save_name, conc, crop_op, RoI1_av, RoI2_av, df_initial, df_norm, dop1_param, dop2_param, dop1_value,
dop2_value, op1_normSEM, op2_normSEM):
# open h5 file
f = h5py.File(save_name, "w")
# -----------------------------
# [group creation]
# header
grp_header = f.create_group('header')
supgrp_nRoI = grp_header.create_group("Pixels for optode")
supgrp_nRoI1 = supgrp_nRoI.create_group("optode1")
supgrp_nRoI2 = supgrp_nRoI.create_group("optode2")
supgrp_RoI = grp_header.create_group("RoI for optode")
supgrp_RoI1 = supgrp_RoI.create_group("optode1")
supgrp_RoI2 = supgrp_RoI.create_group("optode2")
supgrp_conc = grp_header.create_group("concentration point")
# data group
grp_data = f.create_group("data")
supgrp_av = grp_data.create_group('averaged')
supgrp_av1 = supgrp_av.create_group('optode1')
supgrp_av2 = supgrp_av.create_group('optode2')
supgrp_norm = grp_data.create_group('normalized')
supgrp_norm1 = supgrp_norm.create_group('optode1')
supgrp_norm2 = supgrp_norm.create_group('optode2')
# group related to fit process
grp_fit = f.create_group("fit")
supgrp_params = grp_fit.create_group("parameter")
supgrp_params1 = supgrp_params.create_group('optode1')
supgrp_params2 = supgrp_params.create_group('optode2')
supgrp_cov = grp_fit.create_group("covariance matrix")
supgrp_cov1 = supgrp_cov.create_group('optode1')
supgrp_cov2 = supgrp_cov.create_group('optode2')
supgrp_chi = grp_fit.create_group("reduced chi-square")
supgrp_chi1 = supgrp_chi.create_group('optode1')
supgrp_chi2 = supgrp_chi.create_group('optode2')
supgrp_values = grp_fit.create_group("values")
supgrp_values1 = supgrp_values.create_group('optode1')
supgrp_v1_o2av = supgrp_values1.create_group('O2 mean')
supgrp_v1_o2sd = supgrp_values1.create_group('O2 SD')
supgrp_v1_o2sem = supgrp_values1.create_group('O2 SEM')
supgrp_v1_iav = supgrp_values1.create_group('iratio mean')
supgrp_v1_isd = supgrp_values1.create_group('iratio SD')
supgrp_v1_isem = supgrp_values1.create_group('iratio SEM')
supgrp_values2 = supgrp_values.create_group('optode2')
supgrp_v2_o2av = supgrp_values2.create_group('O2 mean')
supgrp_v2_o2sd = supgrp_values2.create_group('O2 SD')
supgrp_v2_o2sem = supgrp_values2.create_group('O2 SEM')
supgrp_v2_iav = supgrp_values2.create_group('iratio mean')
supgrp_v2_isd = supgrp_values2.create_group('iratio SD')
supgrp_v2_isem = supgrp_values2.create_group('iratio SEM')
# --------------------------------------------------------
# [fill groups]
# --------------------------------------------------------
# header
# Pixels for optode
supgrp_nRoI1.create_dataset('RoI1', data=np.array(crop_op[0]))
supgrp_nRoI2.create_dataset('RoI2', data=np.array(crop_op[1]))
# concentration
supgrp_conc.create_dataset('concentration', data=conc)
# RoI within optode
supgrp_RoI1.create_dataset('RoI1', data=np.array(RoI1_av))
supgrp_RoI2.create_dataset('RoI1', data=np.array(RoI2_av))
# ------------------------------
# data
# supgroup - averaged data
for s in df_initial[0].columns:
v = np.array([[i.n for i in df_initial[0][s].values], [i.s for i in df_initial[0][s].values]])
supgrp_av1.create_dataset(str(s), data=np.array(v))
for s in df_initial[1].columns:
v = np.array([[i.n for i in df_initial[1][s].values], [i.s for i in df_initial[1][s].values]])
supgrp_av2.create_dataset(str(s), data=np.array(v))
# ------------------------------
# supgroup - normalized data
for s in df_norm[0].keys():
v = [[i.n for i in df_norm[0][s].values], [i.s for i in df_norm[0][s].values]]
supgrp_norm1.create_dataset(str(s), data=np.array(v))
for s in df_norm[1].keys():
v = [[i.n for i in df_norm[1][s].values], [i.s for i in df_norm[1][s].values]]
supgrp_norm2.create_dataset(str(s), data=np.array(v))
# ------------------------------
# supgroup - fit parameters
for s in dop1_param.keys():
v = [(dop1_param[s][l].mean, dop1_param[s][l].std) for l in ['f', 'k']]
v += [(dop1_param[s]['I0'].n, dop1_param[s]['I0'].s)]
supgrp_params1.create_dataset(str(s), data=np.array(v))
for s in dop2_param.keys():
v = [(dop2_param[s][l].mean, dop2_param[s][l].std) for l in ['f', 'k']]
v += [(dop2_param[s]['I0'].n, dop2_param[s]['I0'].s)]
supgrp_params2.create_dataset(str(s), data=np.array(v))
# ------------------------------
# supgroup - covariance matrix
for s in dop1_param.keys():
supgrp_cov1.create_dataset(str(s), data=np.array(dop1_param[s]['covariance']))
for s in dop2_param.keys():
supgrp_cov2.create_dataset(str(s), data=np.array(dop2_param[s]['covariance']))
# ------------------------------
# supgroup - reduces chi-square
for s in dop1_param.keys():
supgrp_chi1.create_dataset(str(s), data=
|
np.array(dop1_param[s]['fit result'].redchi)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
#Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
from scipy.constants import codata
from pylab import *
from scipy.optimize import curve_fit
import mpmath as mp
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
#from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
#Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import seaborn as sns
import matplotlib.ticker as mtick
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rcParams.update({'axes.labelsize':22})
mpl.rc('xtick', labelsize=16)
mpl.rc('ytick', labelsize=16)
mpl.rc('legend',fontsize=14)
from scipy.constants import codata
F = codata.physical_constants['Faraday constant'][0]
Rg = codata.physical_constants['molar gas constant'][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
'''
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
'''
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(np.log10(f_start), np.log10(f_stop), num=np.around(pts_decade*f_decades).astype(int), endpoint=True)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
'''
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
'''
return 1j*w*L
def elem_C(w,C):
'''
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
'''
return 1/(C*(w*1j))
def elem_Q(w,Q,n):
'''
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return 1/(Q*(w*1j)**n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
'''
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
'''
return Rs + 1/(C*(w*1j))
def cir_RsQ(w, Rs, Q, n):
'''
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return Rs + 1/(Q*(w*1j)**n)
def cir_RQ(w, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return (R/(1+R*Q*(w*1j)**n))
def cir_RsRQ(w, Rs='none', R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RC(w, C='none', R='none', fs='none'):
'''
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
'''
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(w, Rs, R='none', Q='none', n='none', fs='none', R2='none', Q2='none', n2='none', fs2='none'):
'''
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if R2 == 'none':
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif Q2 == 'none':
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
elif n2 == 'none':
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_RsRQQ(w, Rs, Q, n, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w,Q,n)
def cir_RsRQC(w, Rs, C, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
'''
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
'''
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
def Randles_coeff(w, n_electron, A, E='none', E0='none', D_red='none', D_ox='none', C_red='none', C_ox='none', Rg=Rg, F=F, T=298.15):
'''
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- <NAME>., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. R. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
'''
if C_red != 'none' and D_red != 'none':
sigma = ((Rg*T) / ((n_electron**2) * A * (F**2) * (2**(1/2)))) * ((1/(D_ox**(1/2) * C_ox)) + (1/(D_red**(1/2) * C_red)))
elif C_red == 'none' and D_red == 'none' and E!='none' and E0!= 'none':
f = F/(Rg*T)
x = (n_electron*f*(E-E0))/2
func_cosh2 = (np.cosh(2*x)+1)/2
sigma = ((4*Rg*T) / ((n_electron**2) * A * (F**2) * C_ox * ((2*D_ox)**(1/2)) )) * func_cosh2
else:
print('define E and E0')
Z_Aw = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Z_Aw
def cir_Randles(w, n_electron, D_red, D_ox, C_red, C_ox, Rs, Rct, n, E, A, Q='none', fs='none', E0=0, F=F, Rg=Rg, T=298.15):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
'''
Z_Rct = Rct
Z_Q = elem_Q(w,Q,n)
Z_w = Randles_coeff(w, n_electron=n_electron, E=E, E0=E0, D_red=D_red, D_ox=D_ox, C_red=C_red, C_ox=C_ox, A=A, T=T, Rg=Rg, F=F)
return Rs + 1/(1/Z_Q + 1/(Z_Rct+Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q='none', fs='none'):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb='none', Rb='none', fsb='none'):
'''
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
'''
Z_C = elem_C(w,C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb='none', Rb='none', fsb='none', nb='none'):
'''
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
'''
Z_Q = elem_Q(w,Q=Qe,n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
'''
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
'''
return (1-np.exp(-2*x))/(1+np.exp(-2*x))
def cir_RCRCZD(w, L, D_s, u1, u2, Cb='none', Rb='none', fsb='none', Ce='none', Re='none', fse='none'):
'''
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
'''
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(x=alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q='none', n='none'):
'''
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
'''
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1='none'):
'''
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
'''
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1='none', Q2='none'):
'''
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
'''
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
'''
return (1 - np.exp(-2*x))/(2*np.exp(-x))
def coth(x):
'''
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
'''
return (1 + np.exp(-2*x))/(1 - np.exp(-2*x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
'''
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1='none'):
'''
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q='none'):
'''
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1='none', Q2='none'):
'''
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
'''
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w,Q=Q,n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1='none'):
'''
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
'''
Fit Function: -C-
'''
C = params['C']
return 1/(C*(w*1j))
def elem_Q_fit(params, w):
'''
Fit Function: -Q-
Constant Phase Element for Fitting
'''
Q = params['Q']
n = params['n']
return 1/(Q*(w*1j)**n)
def cir_RsC_fit(params, w):
'''
Fit Function: -Rs-C-
'''
Rs = params['Rs']
C = params['C']
return Rs + 1/(C*(w*1j))
def cir_RsQ_fit(params, w):
'''
Fit Function: -Rs-Q-
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
return Rs + 1/(Q*(w*1j)**n)
def cir_RC_fit(params, w):
'''
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['C']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("C") == -1: #elif Q == 'none':
R = params['R']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['C']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['C']
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
'''
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
return R/(1+R*Q*(w*1j)**n)
def cir_RsRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RsRQRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("'R2'") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'Q2'") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'n2'") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("'fs2'") == -1: #elif fs == 'none':
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_Randles_simplified_Fit(params, w):
'''
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
sigma = params['sigma']
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
def cir_RsRQQ_fit(params, w):
'''
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
Z_Q = 1/(Q*(w*1j)**n)
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
'''
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
'''
Rs = params['Rs']
C = params['C']
Z_C = 1/(C*(w*1j))
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
'''
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
C = params['C']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
'''
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
Q = params['Q']
n = params['n']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
'''
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Ce = params['Ce']
Z_C = 1/(Ce*(w*1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RC = (Rb/(1+Rb*Cb*(w*1j)))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
'''
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Qe = params['Qe']
ne = params['ne']
Z_Q = 1/(Qe*(w*1j)**ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Qb = params['Qb']
nb = params['nb']
fsb = params['fsb']
Rb = (1/(Qb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("Qb") == -1: #elif Q == 'none':
Rb = params['Rb']
nb = params['nb']
fsb = params['fsb']
Qb = (1/(Rb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("nb") == -1: #elif n == 'none':
Rb = params['Rb']
Qb = params['Qb']
fsb = params['fsb']
nb = np.log(Qb*Rb)/np.log(1/(2*np.pi*fsb))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
nb = params['nb']
Qb = params['Qb']
Z_RQ = Rb/(1+Rb*Qb*(w*1j)**nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
'''
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: #if R == 'none':
Ce = params['Ce']
fse = params['fse']
Re = (1/(Ce*(2*np.pi*fse)))
if str(params.keys())[10:].find("Ce") == -1: #elif Q == 'none':
Re = params['Rb']
fse = params['fsb']
Ce = (1/(Re*(2*np.pi*fse)))
if str(params.keys())[10:].find("fse") == -1: #elif fs == 'none':
Re = params['Re']
Ce = params['Ce']
Z_RCe = (Re/(1+Re*Ce*(w*1j)))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RCb = (Rb/(1+Rb*Cb*(w*1j)))
# Mass transport impendance
L = params['L']
D_s = params['D_s']
u1 = params['u1']
u2 = params['u2']
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
'''
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = R/(1+R*Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
if str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
'''
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
'''
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
'''
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = (R/(1+R*Q*(w*1j)**n))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
'''
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
elif str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
elif str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R = params['R']
Q = params['Q']
n = params['n']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
'''
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
'''
if circuit == 'C':
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == 'Q':
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == 'R-C':
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == 'R-Q':
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == 'RC':
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == 'RQ':
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == 'R-RQ':
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == 'R-RQ-RQ':
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == 'R-RC-C':
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == 'R-RC-Q':
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == 'R-RQ-Q':
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == 'R-RQ-C':
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == 'R-(Q(RW))':
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == 'R-(Q(RM))':
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == 'C-RC-C':
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == 'Q-RQ-Q':
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == 'RC-RC-ZD':
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == 'R-TLsQ':
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == 'R-RQ-TLsQ':
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == 'R-TLs':
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == 'R-RQ-TLs':
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == 'R-TLQ':
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == 'R-RQ-TLQ':
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == 'R-TL':
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == 'R-RQ-TL':
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == 'R-TL1Dsolid':
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == 'R-RQ-TL1Dsolid':
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print('Circuit is not defined in leastsq_errorfunc()')
error = [(re-re_fit)**2, (im-im_fit)**2] #sum of squares
#Different Weighing options, see Lasia
if weight_func == 'modulus':
weight = [1/((re_fit**2 + im_fit**2)**(1/2)), 1/((re_fit**2 + im_fit**2)**(1/2))]
elif weight_func == 'proportional':
weight = [1/(re_fit**2), 1/(im_fit**2)]
elif weight_func == 'unity':
unity_1s = []
for k in range(len(re)):
unity_1s.append(1) #makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print('weight not defined in leastsq_errorfunc()')
S = np.array(weight) * error #weighted sum of squares
return S
### Fitting Class
class EIS_exp:
'''
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
'''
def __init__(self, path, data, cycle='off', mask=['none','none']):
self.df_raw0 = []
self.cycleno = []
for j in range(len(data)):
if data[j].find(".mpt") != -1: #file is a .mpt file
self.df_raw0.append(extract_mpt(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".DTA") != -1: #file is a .dta file
self.df_raw0.append(extract_dta(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".z") != -1: #file is a .z file
self.df_raw0.append(extract_solar(path=path, EIS_name=data[j])) #reads all datafiles
else:
print('Data file(s) could not be identified')
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j-1]):
if j > 0: #corrects cycle_number except for the first data file
self.df_raw0[j].update({'cycle_number': self.cycleno[j]+np.max(self.cycleno[j-1])}) #corrects cycle number
# else:
# print('__init__ Error (#1)')
#currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]], axis=0)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4]], axis=0)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5]], axis=0)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6]], axis=0)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7]], axis=0)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8]], axis=0)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9]], axis=0)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10]], axis=0)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], axis=0)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11], self.df_raw0[12]], axis=0)
elif len(self.df_raw0) == 14:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], axis=0)
elif len(self.df_raw0) == 15:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], self.df_raw0[14], axis=0)
else:
print("Too many data files || 15 allowed")
self.df_raw = self.df_raw.assign(w = 2*np.pi*self.df_raw.f) #creats a new coloumn with the angular frequency
#Masking data to each cycle
self.df_pre = []
self.df_limited = []
self.df_limited2 = []
self.df = []
if mask == ['none','none'] and cycle == 'off':
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_raw[self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]])
elif mask == ['none','none'] and cycle != 'off':
for i in range(len(cycle)):
self.df.append(self.df_raw[self.df_raw.cycle_number == cycle[i]]) #extracting dataframe for each cycle
elif mask[0] != 'none' and mask[1] == 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_pre.cycle_number.unique())): #Appending data based on cycle number
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] != 'none' and mask[1] == 'none' and cycle != 'off': # or [i for i, e in enumerate(mask) if e == 'none'] == [0]
self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited2.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle == 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(self.df_raw.cycle_number.unique())):
self.df.append(self.df_limited[self.df_limited2.cycle_number == self.df_raw.cycle_number.unique()[i]])
else:
print('__init__ error (#2)')
def Lin_KK(self, num_RC='auto', legend='on', plot='residuals', bode='off', nyq_xlim='none', nyq_ylim='none', weight_func='Boukamp', savefig='none'):
'''
Plots the Linear Kramers-Kronig (KK) Validity Test
The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits
to the data. A data quality analysis can hereby be made on the basis of the relative residuals
Ref.:
- Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
- Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894
The function performs the KK analysis and as default the relative residuals in each subplot
Note, that weigh_func should be equal to 'Boukamp'.
<NAME> (<EMAIL> || <EMAIL>)
Optional Inputs
-----------------
- num_RC:
- 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
that ensures no under- or over-fitting occurs
- can be hardwired by inserting any number (RC-elements/decade)
- plot:
- 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked
- 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description
- nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value
- legend:
- 'on' = displays cycle number
- 'potential' = displays average potential which the spectra was measured at
- 'off' = off
bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
'''
if num_RC == 'auto':
print('cycle || No. RC-elements || u')
self.decade = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
self.number_RC = []
self.number_RC_sort = []
self.KK_u = []
self.KK_Rgreater = []
self.KK_Rminor = []
M = 2
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC.append(M)
self.number_RC_sort.append(M) #needed for self.KK_R
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC_sort.insert(0,0) #needed for self.KK_R
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC_sort)[i]):int(np.cumsum(self.number_RC_sort)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rgreater.append(np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0) )
self.KK_Rminor.append(np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i]))))
for i in range(len(self.df)):
while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:
self.number_RC_sort0 = []
self.KK_R_lim = []
self.number_RC[i] = self.number_RC[i] + 1
self.number_RC_sort0.append(self.number_RC)
self.number_RC_sort = np.insert(self.number_RC_sort0, 0,0)
self.Rparam[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0] #Creates intial guesses for R's
self.t_const[i] = KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i])) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit[i] = minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) ) #maxfev=99
self.R_names[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1] #creates R names
self.KK_R0 = np.delete(np.array(self.KK_R0), np.s_[0:len(self.KK_R0)])
self.KK_R0 = []
for q in range(len(self.df)):
for j in range(len(self.R_names[q])):
self.KK_R0.append(self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value)
self.KK_R_lim = np.cumsum(self.number_RC_sort) #used for KK_R[i]
self.KK_R[i] = self.KK_R0[self.KK_R_lim[i]:self.KK_R_lim[i+1]] #assigns resistances from each spectra to their respective df
self.KK_Rgreater[i] = np.where(np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0)
self.KK_Rminor[i] = np.where(np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0)
self.KK_u[i] = 1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))
else:
print('['+str(i+1)+']'+' '+str(self.number_RC[i]),' '+str(np.round(self.KK_u[i],2)))
elif num_RC != 'auto': #hardwired number of RC-elements/decade
print('cycle || u')
self.decade = []
self.number_RC0 = []
self.number_RC = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC0.append(np.round(num_RC * self.decade[i]))
self.number_RC.append(np.round(num_RC * self.decade[i])) #Creats the the number of -(RC)- circuits
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC0[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC0.insert(0,0)
# print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
self.KK_Rgreater = []
self.KK_Rminor = []
self.KK_u = []
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC0)[i]):int(np.cumsum(self.number_RC0)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rx = np.array(self.KK_R)
self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0) )
self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))) #currently gives incorrect values
print('['+str(i+1)+']'+' '+str(np.round(self.KK_u[i],2)))
else:
print('num_RC incorrectly defined')
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
for i in range(len(self.df)):
if int(self.number_RC[i]) == 2:
self.KK_circuit_fit.append(KK_RC2(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 3:
self.KK_circuit_fit.append(KK_RC3(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 4:
self.KK_circuit_fit.append(KK_RC4(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 5:
self.KK_circuit_fit.append(KK_RC5(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 6:
self.KK_circuit_fit.append(KK_RC6(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 7:
self.KK_circuit_fit.append(KK_RC7(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 8:
self.KK_circuit_fit.append(KK_RC8(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 9:
self.KK_circuit_fit.append(KK_RC9(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 10:
self.KK_circuit_fit.append(KK_RC10(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 11:
self.KK_circuit_fit.append(KK_RC11(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 12:
self.KK_circuit_fit.append(KK_RC12(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 13:
self.KK_circuit_fit.append(KK_RC13(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 14:
self.KK_circuit_fit.append(KK_RC14(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 15:
self.KK_circuit_fit.append(KK_RC15(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 16:
self.KK_circuit_fit.append(KK_RC16(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 17:
self.KK_circuit_fit.append(KK_RC17(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 18:
self.KK_circuit_fit.append(KK_RC18(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 19:
self.KK_circuit_fit.append(KK_RC19(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 20:
self.KK_circuit_fit.append(KK_RC20(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 21:
self.KK_circuit_fit.append(KK_RC21(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 22:
self.KK_circuit_fit.append(KK_RC22(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 23:
self.KK_circuit_fit.append(KK_RC23(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 24:
self.KK_circuit_fit.append(KK_RC24(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 25:
self.KK_circuit_fit.append(KK_RC25(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 26:
self.KK_circuit_fit.append(KK_RC26(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 27:
self.KK_circuit_fit.append(KK_RC27(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 28:
self.KK_circuit_fit.append(KK_RC28(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 29:
self.KK_circuit_fit.append(KK_RC29(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 30:
self.KK_circuit_fit.append(KK_RC30(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 31:
self.KK_circuit_fit.append(KK_RC31(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 32:
self.KK_circuit_fit.append(KK_RC32(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 33:
self.KK_circuit_fit.append(KK_RC33(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 34:
self.KK_circuit_fit.append(KK_RC34(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 35:
self.KK_circuit_fit.append(KK_RC35(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 36:
self.KK_circuit_fit.append(KK_RC36(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 37:
self.KK_circuit_fit.append(KK_RC37(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 38:
self.KK_circuit_fit.append(KK_RC38(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 39:
self.KK_circuit_fit.append(KK_RC39(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 40:
self.KK_circuit_fit.append(KK_RC40(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 41:
self.KK_circuit_fit.append(KK_RC41(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 42:
self.KK_circuit_fit.append(KK_RC42(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 43:
self.KK_circuit_fit.append(KK_RC43(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 44:
self.KK_circuit_fit.append(KK_RC44(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 45:
self.KK_circuit_fit.append(KK_RC45(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 46:
self.KK_circuit_fit.append(KK_RC46(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 47:
self.KK_circuit_fit.append(KK_RC47(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 48:
self.KK_circuit_fit.append(KK_RC48(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 49:
self.KK_circuit_fit.append(KK_RC49(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 50:
self.KK_circuit_fit.append(KK_RC50(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 51:
self.KK_circuit_fit.append(KK_RC51(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 52:
self.KK_circuit_fit.append(KK_RC52(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 53:
self.KK_circuit_fit.append(KK_RC53(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 54:
self.KK_circuit_fit.append(KK_RC54(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 55:
self.KK_circuit_fit.append(KK_RC55(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 56:
self.KK_circuit_fit.append(KK_RC56(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 57:
self.KK_circuit_fit.append(KK_RC57(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 58:
self.KK_circuit_fit.append(KK_RC58(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 59:
self.KK_circuit_fit.append(KK_RC59(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 60:
self.KK_circuit_fit.append(KK_RC60(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 61:
self.KK_circuit_fit.append(KK_RC61(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 62:
self.KK_circuit_fit.append(KK_RC62(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 63:
self.KK_circuit_fit.append(KK_RC63(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 64:
self.KK_circuit_fit.append(KK_RC64(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 65:
self.KK_circuit_fit.append(KK_RC65(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 66:
self.KK_circuit_fit.append(KK_RC66(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 67:
self.KK_circuit_fit.append(KK_RC67(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 68:
self.KK_circuit_fit.append(KK_RC68(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 69:
self.KK_circuit_fit.append(KK_RC69(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 70:
self.KK_circuit_fit.append(KK_RC70(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 71:
self.KK_circuit_fit.append(KK_RC71(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 72:
self.KK_circuit_fit.append(KK_RC72(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 73:
self.KK_circuit_fit.append(KK_RC73(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 74:
self.KK_circuit_fit.append(KK_RC74(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 75:
self.KK_circuit_fit.append(KK_RC75(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 76:
self.KK_circuit_fit.append(KK_RC76(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 77:
self.KK_circuit_fit.append(KK_RC77(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 78:
self.KK_circuit_fit.append(KK_RC78(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 79:
self.KK_circuit_fit.append(KK_RC79(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 80:
self.KK_circuit_fit.append(KK_RC80(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
else:
print('RC simulation circuit not defined')
print(' Number of RC = ', self.number_RC)
self.KK_rr_re.append(residual_real(re=self.df[i].re, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the real part
self.KK_rr_im.append(residual_imag(im=self.df[i].im, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the imag part
### Plotting Linear_kk results
##
#
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == 'on':
for i in range(len(self.df)):
self.label_re_1.append("Z' (#"+str(i+1)+")")
self.label_im_1.append("Z'' (#"+str(i+1)+")")
self.label_cycleno.append('#'+str(i+1))
elif legend == 'potential':
for i in range(len(self.df)):
self.label_re_1.append("Z' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_im_1.append("Z'' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_cycleno.append(str(np.round(np.average(self.df[i].E_avg), 2))+' V')
if plot == 'w_data':
fig = figure(figsize=(6, 8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect='equal')
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df)+2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df)+2)
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(self.df[i].re, self.df[i].im, marker='o', ms=4, lw=2, color=colors[i], ls='-', alpha=.7, label=self.label_cycleno[i])
### Bode Plot
if bode == 'on':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
### Kramers-Kronig Relative Residuals
for i in range(len(self.df)):
ax2.plot(np.log10(self.df[i].f), self.KK_rr_re[i]*100, color=colors_real[i+1], marker='D', ls='--', ms=6, alpha=.7, label=self.label_re_1[i])
ax2.plot(np.log10(self.df[i].f), self.KK_rr_im[i]*100, color=colors_imag[i+1], marker='s', ls='--', ms=6, alpha=.7, label=self.label_im_1[i])
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_re_min)*100*1.5, np.max(np.abs(self.KK_rr_re_min))*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_re_max)*100*.9], color='k', fontweight='bold')
elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_im_min)*100*1.5, np.max(self.KK_rr_im_max)*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_im_max)*100*.9], color='k', fontweight='bold')
### Figure specifics
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != 'none':
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != 'none':
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### Illustrating residuals only
elif plot == 'residuals':
colors = sns.color_palette("colorblind", n_colors=9)
colors_real = sns.color_palette("Blues", n_colors=9)
colors_imag = sns.color_palette("Oranges", n_colors=9)
### 1 Cycle
if len(self.df) == 1:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax = fig.add_subplot(231)
ax.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax.set_xlabel("log(f) [Hz]")
ax.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = np.min(self.KK_rr_im)
self.KK_rr_im_max = np.max(self.KK_rr_im)
self.KK_rr_re_min = np.min(self.KK_rr_re)
self.KK_rr_re_max = np.max(self.KK_rr_re)
if self.KK_rr_re_max > self.KK_rr_im_max:
self.KK_ymax = self.KK_rr_re_max
else:
self.KK_ymax = self.KK_rr_im_max
if self.KK_rr_re_min < self.KK_rr_im_min:
self.KK_ymin = self.KK_rr_re_min
else:
self.KK_ymin = self.KK_rr_im_min
if np.abs(self.KK_ymin) > self.KK_ymax:
ax.set_ylim(self.KK_ymin*100*1.5, np.abs(self.KK_ymin)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin) < self.KK_ymax:
ax.set_ylim(np.negative(self.KK_ymax)*100*1.5, np.abs(self.KK_ymax)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 2 Cycles
elif len(self.df) == 2:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
#cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 3 Cycles
elif len(self.df) == 3:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 4 Cycles
elif len(self.df) == 4:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
ax3.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 5 Cycles
elif len(self.df) == 5:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5,
|
np.abs(self.KK_ymin[1])
|
numpy.abs
|
# -*- coding: utf-8 -*-
"""
@Author : <NAME>
@Time : 2021/3/24 9:28 上午
@FileName: main.py
@desc: 主程序入口文件
"""
import os
import random
from collections import Counter
import unicodedata
import cv2
from Levenshtein import ratio
from PIL import Image
from numpy import average, dot, linalg
import numpy as np
import config
from backend.tools.infer import utility
from backend.tools.infer.predict_det import TextDetector
from backend.tools.infer.predict_system import TextSystem
from config import SubtitleArea
# 加载文本检测+识别模型
class OcrRecogniser:
def __init__(self):
# 获取参数对象
self.args = utility.parse_args()
self.recogniser = self.init_model()
def predict(self, image):
detection_box, recognise_result = self.recogniser(image)
return detection_box, recognise_result
def init_model(self):
self.args.use_gpu = config.USE_GPU
if config.USE_GPU:
# 设置文本检测模型路径
self.args.det_model_dir = config.DET_MODEL_PATH
# 设置文本识别模型路径
self.args.rec_model_dir = config.REC_MODEL_PATH
else:
# 加载快速模型
self.args.det_model_dir = config.DET_MODEL_FAST_PATH
# 加载快速模型
self.args.rec_model_dir = config.REC_MODEL_FAST_PATH
# 设置字典路径
self.args.rec_char_dict_path = config.DICT_PATH
# 设置识别文本的类型
self.args.rec_char_type = config.REC_CHAR_TYPE
return TextSystem(self.args)
class SubtitleDetect:
def __init__(self):
# 获取参数对象
args = utility.parse_args()
args.det_algorithm = 'DB'
args.det_model_dir = config.DET_MODEL_FAST_PATH
self.text_detector = TextDetector(args)
def detect_subtitle(self, img):
dt_boxes, elapse = self.text_detector(img)
return dt_boxes, elapse
class SubtitleExtractor:
"""
视频字幕提取类
"""
def __init__(self, vd_path, sub_area=None):
# 字幕区域位置
self.sub_area = sub_area
self.sub_detector = SubtitleDetect()
# 视频路径
self.video_path = vd_path
self.video_cap = cv2.VideoCapture(vd_path)
# 视频帧总数
self.frame_count = self.video_cap.get(cv2.CAP_PROP_FRAME_COUNT)
# 视频帧率
self.fps = self.video_cap.get(cv2.CAP_PROP_FPS)
# 视频尺寸
self.frame_height = int(self.video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.frame_width = int(self.video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# 字幕出现区域
self.subtitle_area = config.SUBTITLE_AREA
print(f'帧数:{self.frame_count},帧率:{self.fps}')
# 临时存储文件夹
self.temp_output_dir = os.path.join(config.BASE_DIR, 'output')
# 提取的视频帧储存目录
self.frame_output_dir = os.path.join(self.temp_output_dir, 'frames')
# 提取的字幕文件存储目录
self.subtitle_output_dir = os.path.join(self.temp_output_dir, 'subtitle')
# 不存在则创建文件夹
if not os.path.exists(self.frame_output_dir):
os.makedirs(self.frame_output_dir)
if not os.path.exists(self.subtitle_output_dir):
os.makedirs(self.subtitle_output_dir)
# 提取的原始字幕文本存储路径
self.raw_subtitle_path = os.path.join(self.subtitle_output_dir, 'raw.txt')
def run(self):
"""
运行整个提取视频的步骤
"""
print('【处理中】开启提取视频关键帧...')
if self.sub_area is not None and config.ACCURATE_MODE_ON:
self.extract_frame_by_det()
else:
self.extract_frame_by_fps()
print('【结束】提取视频关键帧完毕...')
print('【处理中】开始提取字幕信息,此步骤可能花费较长时间,请耐心等待...')
self.extract_subtitles()
print('【结束】完成字幕提取,生成原始字幕文件...')
if self.sub_area is None:
print('【处理中】开始检测并过滤水印区域内容')
# 询问用户视频是否有水印区域
user_input = input('视频是否存在水印区域,存在的话输入y,不存在的话输入n: ').strip()
if user_input == 'y':
self.filter_watermark()
print('【结束】已经成功过滤水印区域内容')
else:
print('-----------------------------')
if self.sub_area is None:
print('【处理中】开始检测非字幕区域,并将非字幕区域的内容删除')
self.filter_scene_text()
print('【结束】已将非字幕区域的内容删除')
print('【处理中】开始生成字幕文件')
self.generate_subtitle_file()
print('【结束】字幕文件生成成功')
def extract_frame(self):
"""
根据视频的分辨率,将高分辨的视频帧缩放到1280*720p
根据字幕区域位置,将该图像区域截取出来
"""
# 删除缓存
self.__delete_frame_cache()
# 当前视频帧的帧号
frame_no = 0
while self.video_cap.isOpened():
ret, frame = self.video_cap.read()
# 如果读取视频帧失败(视频读到最后一帧)
if not ret:
break
# 读取视频帧成功
else:
frame_no += 1
frame = self._frame_preprocess(frame)
# 帧名往前补零,后续用于排序与时间戳转换,补足8位
# 一部10h电影,fps120帧最多也才1*60*60*120=432000 6位,所以8位足够
filename = os.path.join(self.frame_output_dir, str(frame_no).zfill(8) + '.jpg')
# 保存视频帧
cv2.imwrite(filename, frame)
# 将当前帧与接下来的帧进行比较,计算余弦相似度
compare_times = 0
while self.video_cap.isOpened():
ret, frame_next = self.video_cap.read()
if ret:
frame_no += 1
frame_next = self._frame_preprocess(frame_next)
cosine_distance = self._compute_image_similarity(Image.fromarray(frame),
Image.fromarray(frame_next))
compare_times += 1
if compare_times == config.FRAME_COMPARE_TIMES:
break
if cosine_distance > config.COSINE_SIMILARITY_THRESHOLD:
# 如果下一帧与当前帧的相似度大于设定阈值,则略过该帧
continue
# 如果相似度小于设定阈值,停止该while循环
else:
break
else:
break
self.video_cap.release()
def extract_frame_by_fps(self):
"""
根据帧率,定时提取视频帧,容易丢字幕,但速度快
"""
# 删除缓存
self.__delete_frame_cache()
# 当前视频帧的帧号
frame_no = 0
while self.video_cap.isOpened():
ret, frame = self.video_cap.read()
# 如果读取视频帧失败(视频读到最后一帧)
if not ret:
break
# 读取视频帧成功
else:
frame_no += 1
frame = self._frame_preprocess(frame)
# 帧名往前补零,后续用于排序与时间戳转换,补足8位
# 一部10h电影,fps120帧最多也才1*60*60*120=432000 6位,所以8位足够
filename = os.path.join(self.frame_output_dir, str(frame_no).zfill(8) + '.jpg')
# 保存视频帧
cv2.imwrite(filename, frame)
# 跳过剩下的帧
for i in range(int(self.fps // config.EXTRACT_FREQUENCY) - 1):
ret, _ = self.video_cap.read()
if ret:
frame_no += 1
self.video_cap.release()
def extract_frame_by_det(self):
"""
通过检测字幕区域位置提取字幕帧
"""
# 删除缓存
self.__delete_frame_cache()
# 当前视频帧的帧号
frame_no = 0
while self.video_cap.isOpened():
ret, frame = self.video_cap.read()
# 如果读取视频帧失败(视频读到最后一帧)
if not ret:
break
# 读取视频帧成功
else:
frame_no += 1
if self.sub_area is not None:
ymin, ymax, xmin, xmax = self.sub_area
dt_boxes, elapse = self.sub_detector.detect_subtitle(frame[ymin:ymax, xmin:xmax])
if len(dt_boxes) > 0:
# 保存视频帧
frame = self._frame_preprocess(frame)
# 帧名往前补零,后续用于排序与时间戳转换,补足8位
# 一部10h电影,fps120帧最多也才1*60*60*120=432000 6位,所以8位足够
filename = os.path.join(self.frame_output_dir, str(frame_no).zfill(8) + '.jpg')
cv2.imwrite(filename, frame)
print(f'字幕帧:{frame_no}, 耗时: {elapse}')
self.video_cap.release()
def extract_subtitle_frame(self):
"""
提取包含字幕的视频帧
"""
# 删除缓存
self.__delete_frame_cache()
# 获取字幕帧列表
subtitle_frame_list = self._analyse_subtitle_frame()
if subtitle_frame_list is None:
print('请指定字幕区域')
return
cap = cv2.VideoCapture(self.video_path)
idx = 0
index = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if idx in subtitle_frame_list and idx != 0:
filename = os.path.join(self.frame_output_dir, str(idx).zfill(8) + '.jpg')
frame = self._frame_preprocess(frame)
cv2.imwrite(filename, frame)
subtitle_frame_list.remove(idx)
index += 1
idx = idx + 1
cap.release()
def extract_subtitles(self):
"""
提取视频帧中的字幕信息,生成一个txt文件
"""
# 初始化文本识别对象
text_recogniser = OcrRecogniser()
# 视频帧列表
frame_list = [i for i in sorted(os.listdir(self.frame_output_dir)) if i.endswith('.jpg')]
# 删除缓存
if os.path.exists(self.raw_subtitle_path):
os.remove(self.raw_subtitle_path)
# 新建文件
f = open(self.raw_subtitle_path, mode='w+', encoding='utf-8')
for frame in frame_list:
# 读取视频帧
img = cv2.imread(os.path.join(self.frame_output_dir, frame))
# 获取检测结果
dt_box, rec_res = text_recogniser.predict(img)
# 获取文本坐标
coordinates = self.__get_coordinates(dt_box)
# 将结果写入txt文本中
text_res = [(res[0], res[1]) for res in rec_res]
for content, coordinate in zip(text_res, coordinates):
if self.sub_area is not None:
s_ymin = self.sub_area[0]
s_ymax = self.sub_area[1]
s_xmin = self.sub_area[2]
s_xmax = self.sub_area[3]
xmin = coordinate[0]
xmax = coordinate[1]
ymin = coordinate[2]
ymax = coordinate[3]
if s_xmin <= xmin and xmax <= s_xmax and s_ymin <= ymin and ymax <= s_ymax:
print(content[0], content[1])
if content[1] > config.DROP_SCORE:
f.write(f'{os.path.splitext(frame)[0]}\t'
f'{coordinate}\t'
f'{content[0]}\n')
else:
f.write(f'{os.path.splitext(frame)[0]}\t'
f'{coordinate}\t'
f'{content[0]}\n')
# 关闭文件
f.close()
def filter_watermark(self):
"""
去除原始字幕文本中的水印区域的文本
"""
# 获取潜在水印区域
watermark_areas = self._detect_watermark_area()
# 从frame目录随机读取一张图片,将所水印区域标记出来,用户看图判断是否是水印区域
frame_path = os.path.join(self.frame_output_dir,
random.choice(
[i for i in sorted(os.listdir(self.frame_output_dir)) if i.endswith('.jpg')]))
sample_frame = cv2.imread(frame_path)
# 给潜在的水印区域编号
area_num = ['E', 'D', 'C', 'B', 'A']
for watermark_area in watermark_areas:
ymin = min(watermark_area[0][2], watermark_area[0][3])
ymax = max(watermark_area[0][3], watermark_area[0][2])
xmin = min(watermark_area[0][0], watermark_area[0][1])
xmax = max(watermark_area[0][1], watermark_area[0][0])
cover = sample_frame[ymin:ymax, xmin:xmax]
cover = cv2.blur(cover, (10, 10))
cv2.rectangle(cover, pt1=(0, cover.shape[0]), pt2=(cover.shape[1], 0), color=(0, 0, 255), thickness=3)
sample_frame[ymin:ymax, xmin:xmax] = cover
position = ((xmin + xmax) // 2, ymax)
cv2.putText(sample_frame, text=area_num.pop(), org=position, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)
sample_frame_file_path = os.path.join(os.path.dirname(self.frame_output_dir), 'watermark_area.jpg')
cv2.imwrite(sample_frame_file_path, sample_frame)
print(f'请查看图片, 确定水印区域: {sample_frame_file_path}')
area_num = ['E', 'D', 'C', 'B', 'A']
for watermark_area in watermark_areas:
user_input = input(f'是否去除区域{area_num.pop()}{str(watermark_area)}中的字幕?'
f'\n输入 "y" 或 "回车" 表示去除,输入"n"或其他表示不去除: ').strip()
if user_input == 'y' or user_input == '\n':
with open(self.raw_subtitle_path, mode='r+', encoding='utf-8') as f:
content = f.readlines()
f.seek(0)
for i in content:
if i.find(str(watermark_area[0])) == -1:
f.write(i)
f.truncate()
print(f'已经删除该区域字幕...')
print('水印区域字幕过滤完毕...')
# 删除缓存
if os.path.exists(sample_frame_file_path):
os.remove(sample_frame_file_path)
def filter_scene_text(self):
"""
将场景里提取的文字过滤,仅保留字幕区域
"""
# 获取潜在字幕区域
subtitle_area = self._detect_subtitle_area()[0][0]
# 从frame目录随机读取一张图片,将所水印区域标记出来,用户看图判断是否是水印区域
frame_path = os.path.join(self.frame_output_dir,
random.choice(
[i for i in sorted(os.listdir(self.frame_output_dir)) if i.endswith('.jpg')]))
sample_frame = cv2.imread(frame_path)
# 为了防止有双行字幕,根据容忍度,将字幕区域y范围加高
ymin = abs(subtitle_area[0] - config.SUBTITLE_AREA_DEVIATION_PIXEL)
ymax = subtitle_area[1] + config.SUBTITLE_AREA_DEVIATION_PIXEL
# 画出字幕框的区域
cv2.rectangle(sample_frame, pt1=(0, ymin), pt2=(sample_frame.shape[1], ymax), color=(0, 0, 255), thickness=3)
sample_frame_file_path = os.path.join(os.path.dirname(self.frame_output_dir), 'subtitle_area.jpg')
cv2.imwrite(sample_frame_file_path, sample_frame)
print(f'请查看图片, 确定字幕区域是否正确: {sample_frame_file_path}')
user_input = input(f'是否去除红色框区域外{(ymin, ymax)}的字幕?'
f'\n输入 "y" 或 "回车" 表示去除,输入"n"或其他表示不去除: ').strip()
if user_input == 'y' or user_input == '\n':
with open(self.raw_subtitle_path, mode='r+', encoding='utf-8') as f:
content = f.readlines()
f.seek(0)
for i in content:
i_ymin = int(i.split('\t')[1].split('(')[1].split(')')[0].split(', ')[2])
i_ymax = int(i.split('\t')[1].split('(')[1].split(')')[0].split(', ')[3])
if ymin <= i_ymin and i_ymax <= ymax:
f.write(i)
f.truncate()
print('去除完毕')
# 删除缓存
if os.path.exists(sample_frame_file_path):
os.remove(sample_frame_file_path)
def generate_subtitle_file(self):
"""
生成srt格式的字幕文件
"""
subtitle_content = self._remove_duplicate_subtitle()
print(os.path.splitext(self.video_path)[0])
srt_filename = os.path.join(os.path.splitext(self.video_path)[0] + '.srt')
# 保存持续时间不足1秒的字幕行,用于后续处理
post_process_subtitle = []
with open(srt_filename, mode='w', encoding='utf-8') as f:
for index, content in enumerate(subtitle_content):
line_code = index + 1
frame_start = self._frame_to_timecode(int(content[0]))
# 比较起始帧号与结束帧号, 如果字幕持续时间不足1秒,则将显示时间设为1s
if abs(int(content[1]) - int(content[0])) < self.fps:
frame_end = self._frame_to_timecode(int(int(content[0]) + self.fps))
post_process_subtitle.append(line_code)
else:
frame_end = self._frame_to_timecode(int(content[1]))
frame_content = content[2]
subtitle_line = f'{line_code}\n{frame_start} --> {frame_end}\n{frame_content}\n'
f.write(subtitle_line)
print(f'字幕文件生成位置:{srt_filename}')
# 返回持续时间低于1s的字幕行
return post_process_subtitle
def _analyse_subtitle_frame(self):
"""
使用简单的图像算法找出包含字幕的视频帧
: 参考 https://github.com/BruceHan98/OCR-Extract-Subtitles/blob/main/analyze_key_frame.py
"""
if self.sub_area is None:
return None
else:
subtitle_frame_index_list = []
index = 0
s_ymin = self.sub_area[0]
s_ymax = self.sub_area[1]
s_xmin = self.sub_area[2]
s_xmax = self.sub_area[3]
cap = cv2.VideoCapture(self.video_path)
success, frame = cap.read()
if success:
# 截取字幕部分
frame = frame[s_ymin:s_ymax, s_xmin:s_xmax]
h, w = frame.shape[0:2]
if config.BG_MOD == config.BackgroundColor.DARK: # 深色背景
minuend = np.full(h * w, config.BG_VALUE_DARK) # 被减矩阵
else:
minuend = np.full(h * w, config.BG_VALUE_OTHER) # 被减矩阵
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
flatten_gray = gray.flatten()
last_roi = flatten_gray - minuend
last_roi = np.where(last_roi > 0, 1, 0)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = frame[s_ymin:s_ymax, s_xmin:s_xmax]
if index % config.EXTRACT_INTERVAL == 0:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
flatten_gray = gray.flatten()
roi = flatten_gray - minuend
roi =
|
np.where(roi > 0, 1, 0)
|
numpy.where
|
# import libraries
from presets import Preset
import numpy as np
import matplotlib.pyplot as plt
import librosa as lb
import librosa.display
import time
import sounddevice as sd
y, sr = lb.load('Muumit goes war.mp3', sr=None, duration=100)
audioLength = lb.get_duration(y, sr=sr)
'''
Usually librosa forces the code to use sampling rate of 22050.
We'd prefer to use the sampling rate of audio signal loaded.
Therefore we use presets library to change the default sampling rate to
that of the audio signal. Also in the process, we set the frame length
and frame shift to the values presented in the article.
'''
librosa = Preset(lb)
librosa['sr'] = sr
librosa['n_fft'] = 1024
librosa['hop_length'] = 512
F = lb.stft(y) # Step 1: Calculate the STFT of the input signal
gamma = 0.3 # Defines the amount of range compression
# Step 2: Calculate a range-compressed version of Power Spectrogram
W = np.power(np.abs(F), 2 * gamma)
# Step 3: Set the initial values for harmonic and percussive spectrogram
H = 0.5 * W
P = 0.5 * W
# Two more values are required in the algorithm, and they are as follows
kMax = 100 # The amount of iterations in the algorithm
alpha = 0.3 # Balance parameter (experimentally determined)
for k in range(kMax):
# Step 4: Calculate the update variables delta(k)
# The delta(K) is roughly the difference between harmonic
# and percussive parts. Slots for those parts are made beforehand.
partH = np.zeros_like(H) # An array to store the harmonic part
partP = np.zeros_like(P) # An array to store the percussive part
# The harmonic part is calculated with for loop over the i.
for iIter in range(1, np.shape(H)[1] - 1):
partH[:, iIter] = alpha * ((H[:, iIter - 1] -
(2 * H[:, iIter]) +
H[:, iIter + 1]) / 4)
# The percussive part is calculated with for loop over the h.
for hIter in range(1, np.shape(H)[0] - 1):
partP[hIter, :] = (1 - alpha) * ((P[hIter - 1, :] -
(2 * P[hIter, :]) +
P[hIter + 1, :]) / 4)
deltak = partH - partP # The calculated update variables
# Step 5: Update H and P as defined in the article
H = np.minimum(np.maximum(H + deltak, 0), W)
P = W - H
# Step 6: Increment k by 1 ( if k != kmax -1)
# If we had used while-loop, there would be k+1 here, but with for-loop
# the incrementation is done correctly automatically (requiring that
# we used the correct length for the for-loop).
# Step 7: Binarize the separation result
H = np.where(np.less(H, P), 0, W)
P = np.where(np.greater_equal(H, P), 0, W)
# Step 8: Convert H and P into waveforms
# The H and P are assigned this value in this step because it's needed to
# plot the spectrogram later. Otherwise, we would calculate the waveforms
# directly, not with an additional step. We do not care about overwriting
# existing data on H or P, because they're not needed anymore.
H = np.power(H, (1 / (2 * gamma))) * np.exp(
1j * np.angle(F)) # ISTFT is taken first on this, with H
P = np.power(P, (1 / (2 * gamma))) * np.exp(
1j * np.angle(F)) # ISTFT is taken second on this, with P
# Calculate the actual waveforms with ISTFT. Length is set to len(y)
# so we can subtract the separated waveforms from the original.
h = lb.istft(H, length=len(y))
p = lb.istft(P, length=len(y))
rp = np.max(np.abs(F)) # To scale the colorbar correctly (hopefully)
plt.figure(figsize=(12, 8))
# Plot the original audio's spectrogram.
plt.subplot(3, 1, 1)
lb.display.specshow(lb.amplitude_to_db(np.abs(F), ref=rp), sr=sr,
y_axis='log', x_axis='time')
plt.colorbar()
plt.title('Full spectrogram')
plt.tight_layout()
# Plot the harmonic spectrogram.
plt.subplot(3, 1, 2)
lb.display.specshow(lb.amplitude_to_db(np.abs(H), ref=rp), sr=sr,
y_axis='log', x_axis='time')
plt.colorbar()
plt.title('Harmonic spectrogram')
plt.tight_layout()
# Plot the percussive spectrogram.
plt.subplot(3, 1, 3)
lb.display.specshow(lb.amplitude_to_db(np.abs(P), ref=rp), sr=sr,
y_axis='log', x_axis='time')
plt.colorbar()
plt.title('Percussive spectrogram')
plt.tight_layout()
plt.show()
e = y - p - h # Noise (original signal minus percussive & harmonic components)
# Calculate the sinal noise ratio.
SNR = 10*np.log10(np.sum(np.power(y, 2))/np.sum(
|
np.power(e, 2)
|
numpy.power
|
"""
Simple two-level AMR using TF
"""
import numpy as np
np.set_printoptions(edgeitems=30, linewidth=200, formatter=dict(float=lambda x: "%.3g" % x))
import tensorflow.compat.v1 as tf
from my_libs.utils import grid_points, int2digits, digits2int, linear_interp_coeff
import matplotlib.pyplot as plt
from matplotlib import tri as mtri
from matplotlib import collections as mc
from matplotlib.backends.backend_pdf import PdfPages
def my_show():
try:
pdf.savefig()
except:
plt.show()
def plot_amr(mesh, field, edges, colorbar=False):
fig1, ax1 = plt.subplots(figsize=(7,7))
triang = mtri.Triangulation(mesh[:,0], mesh[:,1], None)
ax1.set_aspect(1)
tpc = ax1.tripcolor(triang, field.ravel(), vmin=0, vmax=1)
if colorbar:
fig1.colorbar(tpc)
lines = mesh[edges]
pbc_flag = np.where(np.linalg.norm(lines[:,0]-lines[:,1],axis=1)< (N1+1))
lines = lines[pbc_flag]
ax1.add_collection(mc.LineCollection(lines, colors='r', linewidths=0.7)) #, colors=np.random.rand([len(edges),3]), linewidths=1.1))
my_show()
class amr_state_variables:
# def __init__(self, dim, shape_all, shape0, mask1, mask1_where, field0, field1, shared_interface, shared_edge3d, mask_all, field_all, edge_all, type_all=None, refine_threshold=0.001, periodic=True):
def __init__(self, dim, shape_all, shape0, field_all, type_all=None, refine_threshold=0.001, periodic=True, buffer=0, eval_freq=1):
# """"
# shared_interface: flag of shape [shape0, dim, 2]
# shared_edge3d: [shape0, dim, dim, 2, 2]
# """"
assert periodic, 'Must be periodic'
self.dim = dim
self.shape_all = tuple(shape_all)
self.n_all = np.prod(shape_all)
self.ijk2int = tf.convert_to_tensor([shape_all[1],1] if dim==2 else [shape_all[1]*shape_all[2],shape_all[2],1], dtype=tf.int64)
# edge_all = tf.tensordot(edge_all, ijk2int, 1)
self.shape0 = tuple(shape0)
shape0 = np.array(shape0)
self.n0 = np.prod(shape0)
shape1 = np.array(shape_all) // np.array(shape0)
self.shape1 = tuple(shape1)
self.shape1_plus1 = (-1,) + tuple(
|
np.array(shape1)
|
numpy.array
|
'''
After noticing such a substantial improvment 2.6% after my own manual changes,
validating my expectations, i've decided to automate this process.
'''
import keras
import os
import pickle
import numpy as np
validation_dir = "prepared_validation_data"
USE_WEIGHTS = True
DECAY = 0.9
# w0 s1 a2 d3 wa4 wd5 sa6 sd7 nk8
WEIGHTS = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
#WEIGHTS =[0.030903154382632643, 1000.0, 0.020275559590445278, 0.013302794647291147, 0.0225283995449392, 0.025031555049932444, 1000.0, 1000.0, 0.016423203268260675]
mapping_dict = {0: "W",
1: "S",
2: "A",
3: "D",
4: "WA",
5: "WD",
6: "SA",
7: "SD",
8: "NK",}
close_dict = {
0: {4: 0.3, 5: 0.3, 8: 0.05}, # Should be W, but said WA or WD NK nbd
1: {6: 0.3, 7: 0.3, 8: 0.05}, # Should be S, but said SA OR SD, NK nbd
2: {4: 0.3, 6: 0.3}, # Should be A, but SA or WA
3: {5: 0.3, 7: 0.3}, # Shoudl be D, but SD or WD
4: {2: 0.5}, # Should be WA, but A
5: {3: 0.5}, # Should be WD, but D
6: {1: 0.3, 2: 0.3}, # Should be SA, but S or A
7: {1: 0.3, 3: 0.3}, # Should be SD, but S or D
8: {0: 0.05, 1: 0.05, 2: 0.05, 3: 0.05, 4: 0.05, 5: 0.05, 6: 0.05, 7: 0.05, }, # should be NK... but whatever.
}
model = keras.models.load_model(f"trained_models/1536269759_xception-v1_p-01.0456_ta-0.73_va-0.38.model")
while True:
dist_dict = {0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0}
total = 0
correct = 0
closeness = 0
# step 1
for f in os.listdir(validation_dir):
if ".pickle" in f:
chunk = pickle.load(open(os.path.join(validation_dir, f), "rb"))
for data in chunk:
total += 1
X = data[1]
X = X/255.0
y = data[0]
prediction = model.predict([X.reshape(-1, X.shape[0], X.shape[1], X.shape[2])])[0]
if USE_WEIGHTS:
prediction = np.array(prediction) * np.array(WEIGHTS)
dist_dict[np.argmax(prediction)] += 1
if np.argmax(prediction) == np.argmax(y):
correct += 1
closeness += 1
else:
if np.argmax(prediction) in close_dict[np.argmax(y)]:
closeness += close_dict[
|
np.argmax(y)
|
numpy.argmax
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Tests for tensorflow_probability.spinoffs.oryx.distributions.distributions_extensions."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import random
import jax.numpy as np
import numpy as onp
from oryx import core
from oryx import distributions as bd
from oryx.core import ppl
DISTRIBUTIONS = [
('normal_scalar_args', bd.Normal, (0., 1.), {}, 0., [0., 1.]),
('normal_scalar_kwargs', bd.Normal, (), {
'loc': 0.,
'scale': 1.
}, 0., [0., 1.]),
('mvn_diag_args', bd.MultivariateNormalDiag,
(onp.zeros(5, dtype=onp.float32), onp.ones(5, dtype=onp.float32)), {},
onp.zeros(5, dtype=onp.float32),
[onp.zeros(5, dtype=onp.float32),
onp.ones(5, dtype=onp.float32)]),
('mvn_diag_kwargs', bd.MultivariateNormalDiag, (), {
'loc': onp.zeros(5, dtype=onp.float32),
'scale_diag': onp.ones(5, dtype=onp.float32)
}, onp.zeros(5, dtype=onp.float32),
[onp.zeros(5, dtype=onp.float32),
|
onp.ones(5, dtype=onp.float32)
|
numpy.ones
|
"""
Core of likelihood computations
This package implements many likelihoods based on the common chi2 statistics
python/numpy version.
N_logLikelihood Computes a normal likelihood (default, symmetric errors)
SN_logLikelihood Computes a Split Normal likelihood (asymmetric errors)
getNorm_lnP Compute the norm of a log-likelihood (overflow robust)
"""
import numpy as np
__all__ = [
"N_chi2_NM",
"N_covar_chi2",
"N_logLikelihood_NM",
"N_covar_logLikelihood",
"N_covar_logLikelihood_cholesky",
"getNorm_lnP",
]
def N_chi2_NM(flux, fluxmod_wbias, ivar, mask=None):
""" compute the non-reduced chi2 between data and model taking into account
the noise model computed from ASTs.
Parameters
----------
flux: np.ndarray[float, ndim=1]
array of fluxes
fluxmod_wbias: np.ndarray[float, ndim=2]
array of modeled fluxes + ast-derived biases (nfilters , nmodels)
ivar: np.ndarray[float, ndim=2]
array of ast-derived inverse variances (nfilters , nmodels)
mask: np.ndarray[bool, ndim=1]
mask array to apply during the calculations mask.shape = flux.shape
Returns
-------
chi2: np.ndarray[float, ndim=1]
array of chi2 values (nmodels)
"""
if (mask is None) or np.all(mask is False):
temp = flux - fluxmod_wbias
_ie = ivar
else:
_m = ~mask.astype(bool)
temp = flux[_m] - fluxmod_wbias[:, _m]
_ie = ivar[:, _m]
return np.einsum("ij,ij,ij->i", temp, temp, _ie)
def N_covar_chi2(flux, fluxmod_wbias, icov_diag, two_icov_offdiag):
""" compute the non-reduced chi2 between data and model using
the full covariance matrix information computed from ASTs.
Parameters
----------
flux: np.ndarray[float, ndim=1]
array of fluxes
fluxmod_wbias: np.ndarray[float, ndim=2]
array of modeled fluxes (nfilters , nmodels)
icov_diag: np.ndarray[float, ndim=2]
array giving the diagnonal terms of the covariance matrix inverse
two_icov_offdiag: np.ndarray[float, ndim=2]
array giving 2x the off diagonal terms of the covariance matrix inverse
Returns
-------
chi2: np.ndarray[float, ndim=1]
array of chi2 values (nmodels)
Note
----
Mask removed as it cannot be used with a precomputed inverse
covariance matrix. (KDG 29 Jan 2016)
"""
# get the number of models and filters
n_models, n_filters = fluxmod_wbias.shape
# compute the difference in fluxes
fluxdiff = flux[None, :] - fluxmod_wbias
# diagonal terms
chisqr = np.einsum("ij,ij,ij->i", fluxdiff, fluxdiff, icov_diag)
# off-diagonal terms
m_start = 0
for k in range(n_filters - 1):
m_end = m_start + n_filters - k - 1
tchisqr = np.einsum(
"ij,ij->i", two_icov_offdiag[:, m_start:m_end], fluxdiff[:, k + 1 :]
)
tchisqr *= fluxdiff[:, k]
chisqr += tchisqr
m_start = m_end
return chisqr
def N_logLikelihood_NM(flux, fluxmod_wbias, ivar, mask=None, lnp_threshold=1000.0):
r""" Computes the log of the chi2 likelihood between data and model taking
into account the noise model.
Parameters
----------
flux: np.ndarray[float, ndim=1]
array of fluxes
fluxmod_wbias: np.ndarray[float, ndim=2]
array of modeled fluxes + ast-derived biases (nfilters, nmodels)
ivar: np.ndarray[float, ndim=2]
array of ast-derived inverse variances (nfilters , nmodels)
mask: np.ndarray[bool, ndim=1]
mask array to apply during the calculations mask.shape = flux.shape
lnp_threshold: float
cut the values outside -x, x in lnp
Returns
-------
(lnp, chi2)
lnP: np.ndarray[float, ndim=1]
array of ln(P) values (Nmodels)
chi2: np.ndarray[float, ndim=1]
array of chi-squared values (Nmodels)
.. math::
P = \\frac{1}{\\sqrt{2pi} * \\sigma} \\times exp ( - \\chi^2 / 2 )
and
\\chi ^ 2 = \\sum_{k} (flux_{obs,k} - flux_{pred,k} - \mu_k) ^ 2 /
\sigma^2_{pred,k}
"""
ni, nj = np.shape(fluxmod_wbias)
# compute the quality factor
# lnQ = -0.5 * nj * ln( 2 * pi) - sum_j {ln( err[j] ) }
temp = 0.5 *
|
np.log(2.0 * np.pi)
|
numpy.log
|
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
import random
from collections import defaultdict
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
def load_train_valid_labels(filename, valid_prop):
lbs = dict()
lbs['f2g'] = dict()
lbs['f2g']['train'] = defaultdict(list)
lbs['f2g']['valid'] = defaultdict(list)
lbs['g2f'] = dict()
lbs['g2f']['train'] = defaultdict(list)
lbs['g2f']['valid'] = defaultdict(list)
with open(filename, 'r') as fin:
for ln in fin:
elems = ln.strip().split()
for val in elems[1].split(';'):
if random.random()<valid_prop:
lbs['f2g']['train'][elems[0]].append(val)
lbs['g2f']['train'][val].append(elems[0])
else:
lbs['f2g']['valid'][elems[0]].append(val)
lbs['g2f']['valid'][val].append(elems[0])
return lbs
# def _fetch_obj_labels(lbs, src_lbs, start_idx, end_idx):
# obj_lbs = set()
# for i in range(start_idx, end_idx):
# src_lb = src_lbs[i]
# for obj_lb in lbs[src_lb]:
# obj_lbs.add(obj_lb)
# return list(obj_lbs)
def batch_iter(lbs, batch_size, neg_ratio, lookup_src, lookup_obj, src_lb_tag, obj_lb_tag):
train_lb_src2obj = lbs['{}2{}'.format(src_lb_tag,obj_lb_tag)]['train']
train_lb_obj2src = lbs['{}2{}'.format(obj_lb_tag,src_lb_tag)]['train']
train_size = len(train_lb_src2obj)
start_index = 0
end_index = min(start_index+batch_size, train_size)
src_lb_keys = train_lb_src2obj.keys()
obj_lb_keys = train_lb_obj2src.keys()
shuffle_indices = np.random.permutation(np.arange(train_size))
while start_index < end_index:
pos_src = list()
pos_obj = list()
neg_src = list()
neg_obj = list()
for i in range(start_index,end_index):
idx = shuffle_indices[i]
src_lb = src_lb_keys[idx]
if not src_lb in lookup_src:
continue
nd_src = lookup_src[src_lb] # idx of embedding in src network
obj_lbs = train_lb_src2obj[src_lb]
for obj_lb in obj_lbs:
cur_neg_src = list()
cur_neg_obj = list()
if not obj_lb in lookup_obj:
continue
nd_obj = lookup_obj[obj_lb]
for k in range(neg_ratio):
rand_nd_obj = None
while not rand_nd_obj or rand_nd_obj in cur_neg_obj or rand_obj_lb in obj_lbs:
rand_obj_lb_idx = random.randint(0, len(obj_lb_keys)-1)
rand_obj_lb = obj_lb_keys[rand_obj_lb_idx]
if not rand_obj_lb in lookup_obj:
# print rand_obj_lb
continue
rand_nd_obj = lookup_obj[rand_obj_lb]
cur_neg_src.append(nd_src)
cur_neg_obj.append(rand_nd_obj)
pos_src.append(nd_src)
pos_obj.append(nd_obj)
neg_src.append(cur_neg_src)
neg_obj.append(cur_neg_obj)
start_index = end_index
end_index = min(start_index+batch_size, train_size)
yield pos_src,pos_obj,neg_src,neg_obj
def valid_iter(lbs, valid_sample_size, lookup_src, lookup_obj, src_lb_tag, obj_lb_tag):
valid_lb_src2obj = lbs['{}2{}'.format(src_lb_tag,obj_lb_tag)]['valid']
valid_lb_obj2src = lbs['{}2{}'.format(obj_lb_tag,src_lb_tag)]['valid']
valid_src = list()
valid_obj = list()
src_lb_keys = valid_lb_src2obj.keys()
obj_lb_keys = valid_lb_obj2src.keys()
for i in range(len(valid_lb_src2obj)):
cand_src = list()
cand_obj = list()
src_lb = src_lb_keys[i]
if not src_lb in lookup_src:
continue
nd_src = lookup_src[src_lb] # idx of embedding in src network
obj_lbs = valid_lb_src2obj[src_lb]
for obj_lb in obj_lbs:
if not obj_lb in lookup_obj:
continue
nd_obj = lookup_obj[obj_lb]
cand_src.append(nd_src)
cand_obj.append(nd_obj)
for k in range(valid_sample_size-1):
rand_nd_obj = None
while not rand_nd_obj or rand_nd_obj in cand_obj or rand_obj_lb in obj_lbs:
rand_obj_lb_idx = random.randint(0, len(obj_lb_keys)-1)
rand_obj_lb = obj_lb_keys[rand_obj_lb_idx]
if not rand_obj_lb in lookup_obj:
continue
rand_nd_obj = lookup_obj[rand_obj_lb]
cand_src.append(nd_src)
cand_obj.append(rand_nd_obj)
if (cand_src and cand_obj) and len(cand_src)==valid_sample_size and len(cand_obj)==valid_sample_size:
valid_src.append(cand_src)
valid_obj.append(cand_obj)
cand_src = list()
cand_obj = list()
return valid_src, valid_obj
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def read_embeddings(embed_file, lookup, look_back):
embedding = list()
with open(embed_file, 'r') as emb_handler:
idx = 0
for ln in emb_handler:
ln = ln.strip()
if ln:
elems = ln.split()
if len(elems)==2:
continue
arr = np.array(map(float, elems[1:]))
embedding.append(arr/np.linalg.norm(arr))
lookup[elems[0]] = idx
look_back.append(elems[0])
idx += 1
return
|
np.array(embedding)
|
numpy.array
|
import os,sys, time,pickle, copy
import numpy as np, tensorflow as tf
import os.path as osp
from baselines import logger
from collections import deque
import baselines.common.tf_util as U
from baselines.common import explained_variance, set_global_seeds, colorize
from baselines.common.policies import build_policy
from contextlib import contextmanager
try:
from mpi4py import MPI
except ImportError:
MPI = None
# MBL
from mbl.mbl import MBL, MBLCEM, MBLMPPI
from mbl.exp_util import eval_policy, Policy
from mbl.util.util import load_extracted_val_data as load_val_data
from mbl.util.util import to_onehot
from mbl.model_config import get_make_mlp_model
#from plot import plot
#from visdom import Visdom
from multiprocessing.dummy import Pool
import multiprocessing as mp
from runner import Runner
from model_novec import Model
def constfn(val):
def f(_):
return val
return f
def learn(*, network,
env, eval_env, make_eval_env, env_id,
total_timesteps, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
sil_update=10, sil_value=0.01, sil_alpha=0.6, sil_beta=0.1, sil_loss=0.1,
# MBL
# For train mbl
mbl_train_freq=5,
# For eval
num_eval_episodes=5,
eval_freq=5,
vis_eval=False,
eval_targs=('mbmf',),
#eval_targs=('mf',),
quant=2,
# For mbl.step
#num_samples=(1500,),
num_samples=(1,),
horizon=(2,),
#horizon=(2,1),
#num_elites=(10,),
num_elites=(1,),
mbl_lamb=(1.0,),
mbl_gamma=0.99,
#mbl_sh=1, # Number of step for stochastic sampling
mbl_sh=10000,
#vf_lookahead=-1,
#use_max_vf=False,
reset_per_step=(0,),
# For get_model
num_fc=2,
num_fwd_hidden=500,
use_layer_norm=False,
# For MBL
num_warm_start=int(1e4),
init_epochs=10,
update_epochs=5,
batch_size=512,
update_with_validation=False,
use_mean_elites=1,
use_ent_adjust=0,
adj_std_scale=0.5,
# For data loading
validation_set_path=None,
# For data collect
collect_val_data=False,
# For traj collect
traj_collect='mf',
# For profile
measure_time=True,
eval_val_err=False,
measure_rew=True,
save_interval=0, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
if not isinstance(num_samples, tuple): num_samples = (num_samples,)
if not isinstance(horizon, tuple): horizon = (horizon,)
if not isinstance(num_elites, tuple): num_elites = (num_elites,)
if not isinstance(mbl_lamb, tuple): mbl_lamb = (mbl_lamb,)
if not isinstance(reset_per_step, tuple): reset_per_step = (reset_per_step,)
if validation_set_path is None:
if collect_val_data: validation_set_path = os.path.join(logger.get_dir(), 'val.pkl')
else: validation_set_path = os.path.join('dataset', '{}-val.pkl'.format(env_id))
if eval_val_err:
eval_val_err_path = os.path.join('dataset', '{}-combine-val.pkl'.format(env_id))
logger.log(locals())
logger.log('MBL_SH', mbl_sh)
logger.log('Traj_collect', traj_collect)
if MPI is not None:
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
else:
nworkers = 1
rank = 0
cpus_per_worker = 1
U.get_session(config=tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=cpus_per_worker,
intra_op_parallelism_threads=cpus_per_worker
))
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
np.set_printoptions(precision=3)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
model_fn = Model
make_model = lambda: Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
sil_update=sil_update,
fn_reward=None, fn_obs=None,
sil_value=sil_value,
sil_alpha=sil_alpha,
sil_beta=sil_beta,
sil_loss=sil_loss,
comm=comm, mpi_rank_weight=mpi_rank_weight,
ppo=True,prev_pi=None)
model=make_model()
pi=model.sil_model
if load_path is not None:
model.load(load_path)
# MBL
# ---------------------------------------
#viz = Visdom(env=env_id)
win = None
eval_targs = list(eval_targs)
logger.log(eval_targs)
make_model_f = get_make_mlp_model(num_fc=num_fc, num_fwd_hidden=num_fwd_hidden, layer_norm=use_layer_norm)
mbl = MBL(env=eval_env, env_id=env_id, make_model=make_model_f,
num_warm_start=num_warm_start,
init_epochs=init_epochs,
update_epochs=update_epochs,
batch_size=batch_size,
**network_kwargs)
val_dataset = {'ob': None, 'ac': None, 'ob_next': None}
if update_with_validation:
logger.log('Update with validation')
val_dataset = load_val_data(validation_set_path)
if eval_val_err:
logger.log('Log val error')
eval_val_dataset = load_val_data(eval_val_err_path)
if collect_val_data:
logger.log('Collect validation data')
val_dataset_collect = []
def _mf_pi(ob, t=None):
stochastic = True
ac, vpred, _, _ = pi.step(ob, stochastic=stochastic)
return ac, vpred
def _mf_det_pi(ob, t=None):
#ac, vpred, _, _ = pi.step(ob, stochastic=False)
ac, vpred = pi._evaluate([pi.pd.mode(), pi.vf], ob)
return ac, vpred
def _mf_ent_pi(ob, t=None):
mean, std, vpred = pi._evaluate([pi.pd.mode(), pi.pd.std, pi.vf], ob)
ac = np.random.normal(mean, std * adj_std_scale, size=mean.shape)
return ac, vpred
################### use_ent_adjust======> adj_std_scale????????pi action sample
def _mbmf_inner_pi(ob, t=0):
if use_ent_adjust:
return _mf_ent_pi(ob)
else:
#return _mf_pi(ob)
if t < mbl_sh: return _mf_pi(ob)
else: return _mf_det_pi(ob)
# ---------------------------------------
# Run multiple configuration once
all_eval_descs = []
def make_mbmf_pi(n, h, e, l):
def _mbmf_pi(ob):
ac, rew = mbl.step(ob=ob, pi=_mbmf_inner_pi, horizon=h, num_samples=n, num_elites=e, gamma=mbl_gamma, lamb=l, use_mean_elites=use_mean_elites)
return ac[None], rew
return Policy(step=_mbmf_pi, reset=None)
for n in num_samples:
for h in horizon:
for l in mbl_lamb:
for e in num_elites:
if 'mbmf' in eval_targs: all_eval_descs.append(('MeanRew', 'MBL_PPO_SIL', make_mbmf_pi(n, h, e, l)))
#if 'mbmf' in eval_targs: all_eval_descs.append(('MeanRew-n-{}-h-{}-e-{}-l-{}-sh-{}-me-{}'.format(n, h, e, l, mbl_sh, use_mean_elites), 'MBL_TRPO-n-{}-h-{}-e-{}-l-{}-sh-{}-me-{}'.format(n, h, e, l, mbl_sh, use_mean_elites), make_mbmf_pi(n, h, e, l)))
if 'mf' in eval_targs: all_eval_descs.append(('MeanRew', 'PPO_SIL', Policy(step=_mf_pi, reset=None)))
logger.log('List of evaluation targets')
for it in all_eval_descs:
logger.log(it[0])
@contextmanager
def timed(msg):
if rank == 0:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta'))
else:
yield
pool = Pool(mp.cpu_count())
warm_start_done = False
U.initialize()
if load_path is not None:
pi.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
epinfobuf = deque(maxlen=40)
if init_fn is not None: init_fn()
if traj_collect == 'mf':
obs= runner.run()[0]
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
if hasattr(model.train_model, "ret_rms"):
model.train_model.ret_rms.update(returns)
if hasattr(model.train_model, "rms"):
model.train_model.rms.update(obs)
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
# Val data collection
if collect_val_data:
for ob_, ac_, ob_next_ in zip(obs[:-1, 0, ...], actions[:-1, ...], obs[1:, 0, ...]):
val_dataset_collect.append((copy.copy(ob_), copy.copy(ac_), copy.copy(ob_next_)))
# -----------------------------
# MBL update
else:
ob_mbl, ac_mbl = obs.copy(), actions.copy()
mbl.add_data_batch(ob_mbl[:-1, ...], ac_mbl[:-1, ...], ob_mbl[1:, ...])
mbl.update_forward_dynamic(require_update=(update-1) % mbl_train_freq == 0,
ob_val=val_dataset['ob'], ac_val=val_dataset['ac'], ob_next_val=val_dataset['ob_next'])
# -----------------------------
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
l_loss, sil_adv, sil_samples, sil_nlogp = model.sil_train(lrnow)
else: # recurrent version
print("caole")
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals =
|
np.mean(mblossvals, axis=0)
|
numpy.mean
|
from typing import NamedTuple, Tuple, List, Dict
import numpy as np
import matplotlib.pyplot as plt
from gym.envs.toy_text.discrete import DiscreteEnv
from matplotlib.animation import FuncAnimation
PL = 0
MNT = 1
WTCH = 2
MNST = 3
LAVA = 4
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
ACTIONS = [UP, RIGHT, DOWN, LEFT]
COLORS = np.array([
[1, 1, 1],
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0]
]).astype(float)
DELTA = {
UP: np.array([-1, 0]),
RIGHT: np.array([0, 1]),
DOWN:
|
np.array([1, 0])
|
numpy.array
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: <NAME>
# @Date: 2018-09-22 09:07:08
# @Last modified by: <NAME> (<EMAIL>)
# @Last Modified time: 2018-10-10 14:43:05
from __future__ import absolute_import, division, print_function
import math
import re
from decimal import Decimal
from operator import eq, ge, gt, le, lt, ne
import numpy as np
from astropy.io import fits
from sdssdb.sqlalchemy.mangadb import MangaBase, database, sampledb
from sqlalchemy import and_, func, select
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.engine import reflection
from sqlalchemy.ext.declarative import AbstractConcreteBase, declared_attr
from sqlalchemy.ext.hybrid import hybrid_method, hybrid_property
from sqlalchemy.orm import deferred, relationship
from sqlalchemy.schema import Column
from sqlalchemy.sql import column
from sqlalchemy.types import Float, Integer, String
SCHEMA = 'mangadatadb'
class Base(AbstractConcreteBase, MangaBase):
__abstract__ = True
_schema = SCHEMA
_relations = 'define_relations'
@declared_attr
def __table_args__(cls):
return {'schema': cls._schema}
class ArrayOps(object):
''' this class adds array functionality '''
__table__ = None
__tablename__ = 'arrayops'
@property
def cols(self):
return list(self.__table__.columns._data.keys())
@property
def collist(self):
return ['wavelength', 'flux', 'ivar', 'mask', 'xpos', 'ypos', 'specres']
def getTableName(self):
return self.__table__.name
def matchIndex(self, name=None):
# Get index of correct column
incols = [x for x in self.cols if x in self.collist]
if not any(incols):
return None
elif len(incols) == 1:
idx = self.cols.index(incols[0])
else:
if not name:
print('Multiple columns found. Column name must be specified!')
return None
elif name in self.collist:
idx = self.cols.index(name)
else:
return None
return idx
def filter(self, start, end, name=None):
# Check input types or map string operators
startnum = type(start) == int or type(start) == float
endnum = type(end) == int or type(end) == float
opdict = {'=': eq, '<': lt, '<=': le, '>': gt, '>=': ge, '!=': ne}
if start in opdict.keys() or end in opdict.keys():
opind = list(opdict.keys()).index(start) if start in opdict.keys() else list(opdict.keys()).index(end)
if start in opdict.keys():
start = opdict[list(opdict.keys())[opind]]
if end in opdict.keys():
end = opdict[list(opdict.keys())[opind]]
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
if startnum and endnum:
arr = [x for x in data if x >= start and x <= end]
elif not startnum and endnum:
arr = [x for x in data if start(x, end)]
elif startnum and not endnum:
arr = [x for x in data if end(x, start)]
elif startnum == eq or endnum == eq:
arr = [x for x in data if start(x, end)] if start == eq else [x for x in data if end(x, start)]
return arr
else:
return None
def equal(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x == num]
return arr
else:
return None
def less(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x <= num]
return arr
else:
return None
def greater(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x >= num]
return arr
else:
return None
def getIndices(self, arr):
if self.idx:
indices = [self.__getattribute__(self.cols[self.idx]).index(a) for a in arr]
else:
return None
return indices
class Cube(ArrayOps, Base):
__tablename__ = 'cube'
print_fields = ['plateifu', 'pipelineInfo.version.version']
specres = deferred(Column(ARRAY(Float, zero_indexes=True)))
specresd = deferred(Column(ARRAY(Float, zero_indexes=True)))
prespecres = deferred(Column(ARRAY(Float, zero_indexes=True)))
prespecresd = deferred(Column(ARRAY(Float, zero_indexes=True)))
target = relationship(sampledb.MangaTarget, backref='cubes')
carts = relationship('Cart', secondary='{0}.cart_to_cube'.format(SCHEMA), backref="cubes")
@property
def header(self):
'''Returns an astropy header'''
session = database.Session.object_session(self)
data = session.query(FitsHeaderKeyword.label, FitsHeaderValue.value,
FitsHeaderValue.comment).join(FitsHeaderValue).filter(
FitsHeaderValue.cube == self).all()
hdr = fits.Header(data)
return hdr
def header_to_dict(self):
'''Returns a simple python dictionary header'''
values = self.headervals
hdrdict = {str(val.keyword.label): val.value for val in values}
return hdrdict
@property
def plateclass(self):
'''Returns a plate class'''
plate = Plate(self)
return plate
def testhead(self, key):
''' Test existence of header keyword'''
try:
if self.header_to_dict()[key]:
return True
except Exception:
return False
def getFlags(self, bits, name):
from sdssdb.sqlalchemy.mangadb.auxdb import MaskBit
session = database.Session.object_session(self)
# if bits not a digit, return None
if not str(bits).isdigit():
return 'NULL'
else:
bits = int(bits)
# Convert the integer value to list of bits
bitlist = [int(i) for i in '{0:08b}'.format(bits)]
bitlist.reverse()
indices = [i for i, bit in enumerate(bitlist) if bit]
labels = []
for i in indices:
maskbit = session.query(MaskBit).filter_by(flag=name, bit=i).one()
labels.append(maskbit.label)
return labels
def getQualBits(self, stage='3d'):
''' get quality flags '''
col = 'DRP2QUAL' if stage == '2d' else 'DRP3QUAL'
hdr = self.header_to_dict()
bits = hdr.get(col, None)
return bits
def getQualFlags(self, stage='3d'):
''' get quality flags '''
name = 'MANGA_DRP2QUAL' if stage == '2d' else 'MANGA_DRP3QUAL'
bits = self.getQualBits(stage=stage)
if bits:
return self.getFlags(bits, name)
else:
return None
def getTargFlags(self, targtype=1):
''' get target flags '''
name = 'MANGA_TARGET1' if targtype == 1 else 'MANGA_TARGET2' if targtype == 2 else 'MANGA_TARGET3'
bits = self.getTargBits(targtype=targtype)
if bits:
return self.getFlags(bits, name)
else:
return None
def getTargBits(self, targtype=1):
''' get target bits '''
assert targtype in [1, 2, 3], 'target type can only 1, 2 or 3'
hdr = self.header_to_dict()
newcol = 'MNGTARG{0}'.format(targtype)
oldcol = 'MNGTRG{0}'.format(targtype)
bits = hdr.get(newcol, hdr.get(oldcol, None))
return bits
def get3DCube(self, extension='flux'):
"""Returns a 3D array of ``extension`` from the cube spaxels.
For example, ``cube.get3DCube('flux')`` will return the original
flux cube with the same ordering as the FITS data cube.
Note that this method seems to be really slow retrieving arrays (this
is especially serious for large IFUs).
"""
session = database.Session.object_session(self)
spaxels = session.query(getattr(Spaxel, extension)).filter(
Spaxel.cube_pk == self.pk).order_by(Spaxel.x, Spaxel.y).all()
# Assumes cubes are always square (!)
nx = ny = int(np.sqrt(len(spaxels)))
nwave = len(spaxels[0][0])
spArray =
|
np.array(spaxels)
|
numpy.array
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The matplotlib plotter implementation for all the primitive tasks (in our case: lines and
dots)
"""
from typing import Any, Callable, Dict, List
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import numpy as np
from .core import BasePlotter, BasePlotterTask
class Matplotlib2DPlotter(BasePlotter):
_fig: plt.figure # plt figure
_ax: plt.axis # plt axis
# stores artist objects for each task (task name as the key)
_artist_cache: Dict[str, Any]
# callables for each task primitives
_create_impl_callables: Dict[str, Callable]
_update_impl_callables: Dict[str, Callable]
def __init__(self, task: "BasePlotterTask") -> None:
fig, ax = plt.subplots()
self._fig = fig
self._ax = ax
self._artist_cache = {}
self._create_impl_callables = {
"Draw2DLines": self._lines_create_impl,
"Draw2DDots": self._dots_create_impl,
"Draw2DTrail": self._trail_create_impl,
}
self._update_impl_callables = {
"Draw2DLines": self._lines_update_impl,
"Draw2DDots": self._dots_update_impl,
"Draw2DTrail": self._trail_update_impl,
}
self._init_lim()
super().__init__(task)
@property
def ax(self):
return self._ax
@property
def fig(self):
return self._fig
def show(self):
plt.show()
def _min(self, x, y):
if x is None:
return y
if y is None:
return x
return min(x, y)
def _max(self, x, y):
if x is None:
return y
if y is None:
return x
return max(x, y)
def _init_lim(self):
self._curr_x_min = None
self._curr_y_min = None
self._curr_x_max = None
self._curr_y_max = None
def _update_lim(self, xs, ys):
self._curr_x_min = self._min(np.min(xs), self._curr_x_min)
self._curr_y_min = self._min(np.min(ys), self._curr_y_min)
self._curr_x_max = self._max(np.max(xs), self._curr_x_max)
self._curr_y_max = self._max(np.max(ys), self._curr_y_max)
def _set_lim(self):
if not (
self._curr_x_min is None
or self._curr_x_max is None
or self._curr_y_min is None
or self._curr_y_max is None
):
self._ax.set_xlim(self._curr_x_min, self._curr_x_max)
self._ax.set_ylim(self._curr_y_min, self._curr_y_max)
self._init_lim()
@staticmethod
def _lines_extract_xy_impl(index, lines_task):
return lines_task[index, :, 0], lines_task[index, :, 1]
@staticmethod
def _trail_extract_xy_impl(index, trail_task):
return (trail_task[index : index + 2, 0], trail_task[index : index + 2, 1])
def _lines_create_impl(self, lines_task):
color = lines_task.color
self._artist_cache[lines_task.task_name] = [
self._ax.plot(
*Matplotlib2DPlotter._lines_extract_xy_impl(i, lines_task),
color=color,
linewidth=lines_task.line_width,
alpha=lines_task.alpha
)[0]
for i in range(len(lines_task))
]
def _lines_update_impl(self, lines_task):
lines_artists = self._artist_cache[lines_task.task_name]
for i in range(len(lines_task)):
artist = lines_artists[i]
xs, ys = Matplotlib2DPlotter._lines_extract_xy_impl(i, lines_task)
artist.set_data(xs, ys)
if lines_task.influence_lim:
self._update_lim(xs, ys)
def _dots_create_impl(self, dots_task):
color = dots_task.color
self._artist_cache[dots_task.task_name] = self._ax.plot(
dots_task[:, 0],
dots_task[:, 1],
c=color,
linestyle="",
marker=".",
markersize=dots_task.marker_size,
alpha=dots_task.alpha,
)[0]
def _dots_update_impl(self, dots_task):
dots_artist = self._artist_cache[dots_task.task_name]
dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])
if dots_task.influence_lim:
self._update_lim(dots_task[:, 0], dots_task[:, 1])
def _trail_create_impl(self, trail_task):
color = trail_task.color
trail_length = len(trail_task) - 1
self._artist_cache[trail_task.task_name] = [
self._ax.plot(
*Matplotlib2DPlotter._trail_extract_xy_impl(i, trail_task),
color=trail_task.color,
linewidth=trail_task.line_width,
alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))
)[0]
for i in range(trail_length)
]
def _trail_update_impl(self, trail_task):
trails_artists = self._artist_cache[trail_task.task_name]
for i in range(len(trail_task) - 1):
artist = trails_artists[i]
xs, ys = Matplotlib2DPlotter._trail_extract_xy_impl(i, trail_task)
artist.set_data(xs, ys)
if trail_task.influence_lim:
self._update_lim(xs, ys)
def _create_impl(self, task_list):
for task in task_list:
self._create_impl_callables[task.task_type](task)
self._draw()
def _update_impl(self, task_list):
for task in task_list:
self._update_impl_callables[task.task_type](task)
self._draw()
def _set_aspect_equal_2d(self, zero_centered=True):
xlim = self._ax.get_xlim()
ylim = self._ax.get_ylim()
if not zero_centered:
xmean = np.mean(xlim)
ymean = np.mean(ylim)
else:
xmean = 0
ymean = 0
plot_radius = max(
[
abs(lim - mean_)
for lims, mean_ in ((xlim, xmean), (ylim, ymean))
for lim in lims
]
)
self._ax.set_xlim([xmean - plot_radius, xmean + plot_radius])
self._ax.set_ylim([ymean - plot_radius, ymean + plot_radius])
def _draw(self):
self._set_lim()
self._set_aspect_equal_2d()
self._fig.canvas.draw()
self._fig.canvas.flush_events()
plt.pause(0.00001)
class Matplotlib3DPlotter(BasePlotter):
_fig: plt.figure # plt figure
_ax: p3.Axes3D # plt 3d axis
# stores artist objects for each task (task name as the key)
_artist_cache: Dict[str, Any]
# callables for each task primitives
_create_impl_callables: Dict[str, Callable]
_update_impl_callables: Dict[str, Callable]
def __init__(self, task: "BasePlotterTask") -> None:
self._fig = plt.figure()
self._ax = p3.Axes3D(self._fig)
self._artist_cache = {}
self._create_impl_callables = {
"Draw3DLines": self._lines_create_impl,
"Draw3DDots": self._dots_create_impl,
"Draw3DTrail": self._trail_create_impl,
}
self._update_impl_callables = {
"Draw3DLines": self._lines_update_impl,
"Draw3DDots": self._dots_update_impl,
"Draw3DTrail": self._trail_update_impl,
}
self._init_lim()
super().__init__(task)
@property
def ax(self):
return self._ax
@property
def fig(self):
return self._fig
def show(self):
plt.show()
def _min(self, x, y):
if x is None:
return y
if y is None:
return x
return min(x, y)
def _max(self, x, y):
if x is None:
return y
if y is None:
return x
return max(x, y)
def _init_lim(self):
self._curr_x_min = None
self._curr_y_min = None
self._curr_z_min = None
self._curr_x_max = None
self._curr_y_max = None
self._curr_z_max = None
def _update_lim(self, xs, ys, zs):
self._curr_x_min = self._min(np.min(xs), self._curr_x_min)
self._curr_y_min = self._min(np.min(ys), self._curr_y_min)
self._curr_z_min = self._min(np.min(zs), self._curr_z_min)
self._curr_x_max = self._max(np.max(xs), self._curr_x_max)
self._curr_y_max = self._max(
|
np.max(ys)
|
numpy.max
|
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from sktime.forecasters import DummyForecaster, EnsembleForecaster
from sktime.forecasters import ExpSmoothingForecaster
from sktime.forecasters import ARIMAForecaster
from sktime.datasets import load_shampoo_sales
__author__ = "<NAME>"
# forecasters
FORECASTERS = (DummyForecaster, ExpSmoothingForecaster, ARIMAForecaster)
# forecast horizons
FHS = ([1], [1, 3], np.array([1]), np.array([1, 3]), np.arange(5))
# load test data
y = load_shampoo_sales()
# test default forecasters output for different forecasters horizons
@pytest.mark.parametrize("fh", FHS)
def test_EnsembleForecaster_fhs(fh):
estimators = [
('ses', ExpSmoothingForecaster()),
('last', DummyForecaster(strategy='last'))
]
m = EnsembleForecaster(estimators=estimators)
m.fit(y, fh=fh)
y_pred = m.predict(fh=fh)
# adjust for default value
if fh is None:
fh = np.array([1])
if isinstance(fh, list):
fh =
|
np.asarray(fh)
|
numpy.asarray
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
- <NAME> <<EMAIL>>
- <NAME> <<EMAIL>>
DESCRIPTION
usage: cwt_analysis_synthesis.py [-h] [-v] [-M MODE] [-m MEAN_F0] [-o OUTPUT]
[-P]
input_file
Tool for CWT analysis/synthesis of the F0
positional arguments:
input_file Input signal or F0 file
optional arguments:
-h, --help show this help message and exit
-v, --verbosity increase output verbosity
-M MODE, --mode MODE script mode: 0=analysis, 1=synthesis, 2=analysis/synthesis
-m MEAN_F0, --mean_f0 MEAN_F0
Mean f0 needed for synthesis (unsed for analysis modes)
-o OUTPUT, --output OUTPUT
output directory for analysis or filename for synthesis.
(Default: input_file directory [Analysis] or <input_file>.f0 [Synthesis])
-P, --plot Plot the results
LICENSE
See https://github.com/asuni/wavelet_prosody_toolkit/blob/master/LICENSE.txt
"""
import sys
import os
import traceback
import argparse
import time
import logging
import yaml
# Collections
from collections import defaultdict
import warnings
# Plotting
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# Wavelet import
from wavelet_prosody_toolkit.prosody_tools import misc
from wavelet_prosody_toolkit.prosody_tools import cwt_utils
from wavelet_prosody_toolkit.prosody_tools import f0_processing
import numpy as np
# List of logging levels used to setup everything using verbose option
LEVEL = [logging.WARNING, logging.INFO, logging.DEBUG]
# FIXME: be more specific!
warnings.simplefilter("ignore", np.ComplexWarning) # Plotting can't deal with complex, but we don't care
###############################################################################
# Functions
###############################################################################
def apply_configuration(current_configuration, updating_part):
"""Utils to update the current configuration using the updating part
Parameters
----------
current_configuration: dict
The current state of the configuration
updating_part: dict
The information to add to the current configuration
Returns
-------
dict
the updated configuration
"""
if not isinstance(current_configuration, dict):
return updating_part
if current_configuration is None:
return updating_part
if updating_part is None:
return current_configuration
for k in updating_part:
if k not in current_configuration:
current_configuration[k] = updating_part[k]
else:
current_configuration[k] = apply_configuration(current_configuration[k], updating_part[k])
return current_configuration
def load_f0(input_file, binary_mode=False, configuration=None):
"""Load the f0 from a text file or extract it from a wav file
Parameters
----------
input_file: string
The input file name.
Returns
-------
1D arraylike
the raw f0 values
"""
if input_file.lower().endswith(".csv"):
if binary_mode:
raise Exception("cannot have a csv file in binary mode")
else:
raw_f0 = np.loadtxt(input_file)
if input_file.lower().endswith(".f0"):
if binary_mode:
raw_f0 = np.fromfile(input_file, dtype=np.float32)
else:
raw_f0 = np.loadtxt(input_file)
elif input_file.lower().endswith(".lf0"):
if binary_mode:
raw_f0 = np.fromfile(input_file, dtype=np.float32)
else:
raw_f0 = np.loadtxt(input_file)
raw_f0 = np.exp(raw_f0)
elif input_file.lower().endswith(".wav"):
logging.info("Extracting the F0 from the signal")
(fs, wav_form) = misc.read_wav(input_file)
raw_f0 = f0_processing.extract_f0(wav_form, fs,
configuration["f0"]["min_f0"],
configuration["f0"]["max_f0"])
return raw_f0
###############################################################################
# Main function
###############################################################################
def run():
"""Main entry function
This function contains the code needed to achieve the analysis and/or the synthesis
"""
global args
warnings.simplefilter("ignore", FutureWarning) # Plotting can't deal with complex, but we don't care
# Loading default configuration
configuration = defaultdict()
with open(os.path.dirname(os.path.realpath(__file__)) + "/configs/default.yaml", 'r') as f:
configuration = apply_configuration(configuration, defaultdict(lambda: False, yaml.load(f)))
logging.debug("default configuration")
logging.debug(configuration)
# Loading dedicated analysis.synthesis configuration
with open(os.path.dirname(os.path.realpath(__file__)) + "/configs/synthesis.yaml", 'r') as f:
configuration = apply_configuration(configuration, defaultdict(lambda: False, yaml.load(f)))
logging.debug("configuration filled with synthesis part")
logging.debug(configuration)
# Loading user configuration
if args.configuration_file:
try:
with open(args.configuration_file, 'r') as f:
configuration = apply_configuration(configuration, defaultdict(lambda: False, yaml.load(f)))
logging.debug("configuration filled with user part")
logging.debug(configuration)
except IOError as ex:
logging.error("configuration file " + args.config + " could not be loaded:")
logging.error(ex.msg)
sys.exit(1)
# Analysis Mode
if args.mode == 0:
raw_f0 = load_f0(args.input_file, args.binary_mode, configuration)
logging.info("Processing f0")
f0 = f0_processing.process(raw_f0)
# FIXME: reintegrated
if args.plot:
plt.title("F0 preprocessing and interpolation")
plt.plot(f0, color="red", alpha=0.5, linewidth=3)
plt.plot(raw_f0, color="gray", alpha=0.5)
plt.show()
# # FIXME: read this?
# logging.info("writing interpolated lf0\t" + output_file + ".interp")
# np.savetxt(output_file + ".interp", f0.astype('float'),
# fmt="%f", delimiter="\n")
# Perform continuous wavelet transform of mean-substracted f0 with 12 scales, one octave apart
logging.info("Starting analysis with (num_scale=%d, scale_distance=%f, mother_name=%s)" %
(configuration["wavelet"]["num_scales"], configuration["wavelet"]["scale_distance"], configuration["wavelet"]["mother_wavelet"]))
full_scales, widths, _ = cwt_utils.cwt_analysis(f0 - np.mean(f0),
mother_name=configuration["wavelet"]["mother_wavelet"],
period=configuration["wavelet"]["period"],
num_scales=configuration["wavelet"]["num_scales"],
scale_distance=configuration["wavelet"]["scale_distance"],
apply_coi=False)
full_scales = np.real(full_scales)
# SSW parameterization, adjacent scales combined (with extra scales to handle long utterances)
scales = cwt_utils.combine_scales(np.real(full_scales), configuration["wavelet"]["combined_scales"])
for i in range(0, len(scales)):
logging.debug("Mean scale[%d]: %s" % (i, str(
|
np.mean(scales[i])
|
numpy.mean
|
"""
Common covariance operator implementations.
"""
import math
import numpy as np
from numpy import random
from scipy import sparse, linalg
from . import CovarianceOperator
__all__ = [
'to_matrix', 'DenseCovariance', 'DiagonalCovariance',
]
def to_matrix(C, force_dense=False, out=None):
"""
Returns matrix representation of a covariance operator.
If the passed instance is already a covariance matrix, it is returned as-is. Compatible matrix types are
* ``numpy.ndarray``, ``numpy.matrix`` - returned as-is
* subclass of ``scipy.sparse.spmatrix`` - returned as-is if ``force_dense=False``, otherwise converted to
``numpy.ndarray``.
If ``C`` is an instance of :class:`endas.CovarianceOperator`, the result of ``C.to_matrix()`` is returned.
Please note that not all covariance operators support conversion to explicit matrix form.
Args:
C : :class:`endas.CovarianceOperator` instance or NumPy array.
force_dense : Value passed to :meth:`endas.CovarianceOperator.to_matrix()`.
out : If specified, the array is used as the output buffer instead of allocating new array.
The provided array must have the correct shape.
Returns:
Dense NumPy array (``ndarray``) or sparse SciPy array depending on ``C`` and ``force_dense``.
Raises:
TypeError: if C is of unsupported type
ValueError: if C is a non-square matrix
NotImplementedError: raised by :meth:`endas.CovarianceOperator.to_matrix()`
MemoryError: if the covariance matrix array cannot be allocated
"""
if isinstance(C, np.ndarray): ret = C
elif isinstance(C, np.matrix): ret = C
elif isinstance(C, sparse.spmatrix): ret = C.toarray(out=out)
elif isinstance(C, CovarianceOperator): ret = C.to_matrix(force_dense, out=out)
else:
raise TypeError("C is not an instance of matrix or CovarianceOperator")
if ret.shape[0] != ret.shape[1]:
raise ValueError("C must be a square matrix")
return ret
class DiagonalCovariance(CovarianceOperator):
"""
Implements diagonal covariance matrix.
The covariance operator is internally represented by a sparse matrix. Currently only the main diagonal
is supported but other diagonals could be added, if needed. The operator supports all methods of
:class:`CovarianceOperator`. The covariance can be instantiated with either the array of diagonal
elements or the reciprocal (inverse) array. This can prevent numerical issues in situations where the inverse
coefficients are near zero (thus leading to very large coefficients on the original diagonal) and if only
the inverse coefficients are needed (such as when only ``solve()`` is called).
Args:
diag (nx1 array) : Vector of diagonal elements of the covariance matrix. Defines covariance matrix of
shape (n, n)
invdiag (nx1 array) : Vector of inverse diagonal elements of the covariance matrix. Defines covariance
matrix of shape (n, n)
Only one of the ``diag`` and ``invdiag`` arrays can be given.
"""
def __init__(self, diag=None, invdiag=None):
if diag is not None and invdiag is not None:
raise ValueError("Only one of 'diag' and 'invdiag' arrays can be given.")
elif diag is None and invdiag is None:
raise ValueError("One of 'diag' or 'invdiag' arrays is needed.")
elif diag is not None:
self._diag_is_original = True
self._Q = sparse.diags(diag)
self._Qinv = sparse.diags(np.reciprocal(diag))
self._sddiag = np.sqrt(diag)
else:
self._diag_is_original = False
self._sddiag = np.reciprocal(invdiag)
self._Q = sparse.diags(np.copy(self._sddiag))
self._Qinv = sparse.diags(invdiag)
self._sddiag = np.sqrt(self._sddiag)
@property
def shape(self): return self._Q.shape
@property
def is_diagonal(self): return True
@property
def mc_only(self): return False
def diagonal(self):
"""
Returns the matrix diagonal as an array.
Returns:
Array of length ``self.shape[0]`` or ``self.shape[1]``.
"""
return self._Q.diagonal()
def inv_diagonal(self):
"""
Returns the inverse of the matrix diagonal as an array.
Returns:
Array of length ``self.shape[0]`` or ``self.shape[1]``.
"""
return self._Qinv.diagonal()
def random_multivariate_normal(self, N=1):
assert N >= 1
n = self.shape[0]
if N == 1:
return random.randn(n) * self._sddiag
else:
rv = random.randn(n * N).reshape(n, N)
return rv * self._sddiag.reshape(n, 1)
def solve(self, b, overwrite_b=False):
return self._Qinv.dot(b)
def to_matrix(self, force_dense=False, out=None):
return self._Q if not force_dense else np.asarray(self._Q.todense())
def add_to(self, x):
# Todo: custom add-to-diagonal ufunc?
np.fill_diagonal(x, x.diagonal() + self._Q.diagonal())
def localize(self, selected, taper):
sel_diag = self._Q.diagonal()[selected]
sel_invdiag = self._Qinv.diagonal()[selected]
if taper is not None:
assert len(selected) == len(taper)
sel_invdiag = sel_invdiag * taper
return DiagonalCovariance(invdiag=sel_invdiag)
elif self._diag_is_original:
return DiagonalCovariance(diag=sel_diag)
else:
return DiagonalCovariance(invdiag=sel_invdiag)
class DenseCovariance(CovarianceOperator):
"""
Dense covariance matrix as CovarianceOperator.
This is a trivial wrapper around a dense covariance matrix stored in a NumPy array. Use only on small problems
when other representations don't fit.
Args:
C : Square NumPy matrix or array representing the covariance matrix.
"""
def __init__(self, C):
self._C = np.asarray(C)
@property
def shape(self): return self._C.shape
@property
def is_diagonal(self): return False
@property
def mc_only(self): return False
def random_multivariate_normal(self, N=1):
assert N >= 1
n = self.shape[0]
if N == 1:
return random.multivariate_normal(np.zeros(n), self._C)
else:
return random.multivariate_normal(
|
np.zeros(n)
|
numpy.zeros
|
import logging
import numpy as np
import pickle
from copy import deepcopy
from ase.atoms import Atoms
from thyme.utils.cell import convert_cell_format
from thyme.utils.savenload import save_file, load_file
from thyme.utils.atomic_symbols import species_to_order_label
from ._key import *
class Trajectory(object):
default_per_frame_keys = [
POSITION,
FORCE,
TOTAL_ENERGY,
CELL,
]
default_metadata_keys = [
"dipole_correction",
"species",
"nelm",
"cutoff",
"kpoints",
]
stat_keys = [
NATOMS,
SPECIES,
"nframes",
"name",
"formula",
PER_FRAME_ATTRS,
METADATA_ATTRS,
"fixed_attrs",
"filenames",
]
def __init__(self):
"""
dummy init. do nothing
"""
self._items = dict()
# unique name that can be used for printing
self.name = "default"
self.nframes = 0
self.natoms = 0
self.formula = ""
self.per_frame_attrs = []
self.metadata_attrs = []
self.fixed_attrs = []
self._iter_index = 0
def __len__(self):
return self.nframes
def __repr__(self):
s = f"{self.name}: {self.nframes} frames with {self.natoms} atoms, {self.formula}"
return s
def __str__(self):
"""
string method to list all details or shape of the trajectory
"""
s = repr(self)
for k in self.per_frame_attrs:
item = getattr(self, k)
s += f"\n {k} {item.shape}"
s += "metadata:\n"
for k in self.metadata_attrs:
item = getattr(self, k)
if isinstance(item, np.ndarray):
s += f"\n {k} shape {item.shape}"
elif isinstance(item, np.ndarray):
s += f"\n {k} len {len(item)}"
else:
s += f"\n {k} value {item}"
return s
@property
def keys(self):
return (
self.per_frame_attrs
+ self.metadata_attrs
+ self.fixed_attrs
+ self.stat_keys
)
def __getitem__(self, key):
if key in self.per_frame_attrs or key in self.metadata_attrs:
return getattr(self, key, None)
if isinstance(key, int):
return self.get_frame(key)
def __iter__(self):
return self
def __next__(self):
self._iter_index = getattr(self, "_iter_index", 0)
if self._iter_index >= self.nframes:
raise StopIteration
self._iter_index += 1
return self.get_frame(self._iter_index - 1)
def get_frame(self, idx, keys=None):
if idx >= self.nframes:
raise ValueError(f"{idx} is larger than the total length {self.nframes}")
frame = {NATOMS: self.natoms, SPECIES: self.species}
if keys is None:
frame.update({key: getattr(self, key)[idx] for key in self.per_frame_attrs})
frame.update({key: getattr(self, key) for key in self.fixed_attrs})
else:
frame.update(
{
key: getattr(self, key)[idx]
for key in self.per_frame_attrs
if key in keys
}
)
frame.update(
{key: getattr(self, key) for key in self.fixed_attrs if key in keys}
)
return frame
def add_frames(self, dictionary):
find_key = [(key in dictionary) for key in self.per_frame_attrs]
if not all(find_key):
raise RuntimeError("key missing")
match_fields = [
(repr(dictionary[key]) == getattr(self.key))
for key in self.fixed_attrs
if key in dictionary
]
if not all(match_fields):
raise RuntimeError("fixed fields are not consistent missing")
for key in self.per_frame_attrs:
mat = np.vstack((getattr(self, key), dictionary[key]))
setattr(self, key, mat)
self.nframes += dictionary[POSITION].shape[0]
def get_attr(self, key):
if key in self.per_frame_attrs:
return getattr(self, key)
else:
return np.array([getattr(self, key)] * self.nframes)
def pop(self, key, fail=None):
item = self.get_attr(key)
for name_list in [self.per_frame_attrs, self.fixed_attrs, self.metadata_attrs]:
if key in name_list:
name_list.remove(key)
delattr(self, key)
return item
def add_frames(self, dictionary):
find_key = [(key in dictionary) for key in self.per_frame_attrs]
if not all(find_key):
raise RuntimeError("key missing")
match_fields = [
(repr(dictionary[key]) == getattr(self.key))
for key in self.fixed_attrs
if key in dictionary
]
if not all(match_fields):
raise RuntimeError("fixed fields are not consistent missing")
for key in self.per_frame_attrs:
mat = np.append(getattr(self, key), dictionary[key], axis=0)
setattr(self, key, mat)
self.nframes += dictionary[POSITION].shape[0]
def sanity_check(self):
for k in self.stat_keys:
if not hasattr(self, k):
setattr(self, k, 0)
if self.nframes < 0:
raise RuntimeError("nframes should be non-negative int")
if len(self.per_frame_attrs) != 0:
frames = {}
for k in self.per_frame_attrs:
frames[k] = len(getattr(self, k))
frames_values = set(list(frames.values()))
if len(frames_values) > 1:
logging.error(f"numbers of frames are inconsistent: {frames}")
raise RuntimeError(f"Data inconsistent")
if self.nframes != list(frames_values)[0]:
logging.error(
f"numbers of frames are inconsistent: {frames} and nframes = {self.nframes}"
)
raise RuntimeError(f"Data inconsistent")
if len(self.per_frame_attrs) > len(list(set(self.per_frame_attrs))):
raise ValueError(f"repeated keys in self.per_frame_attrs")
# always put POSITION as the first attribute
if POSITION not in self.per_frame_attrs:
raise ValueError(POSITION + " has to be defined")
idx = self.per_frame_attrs.index(POSITION)
if idx != 0:
temp = self.per_frame_attrs[0]
self.per_frame_attrs[0] = POSITION
self.per_frame_attrs[idx] = temp
if (
self.position.shape[1] != self.natoms
or len(self.species) != self.natoms
):
if self.position.shape[1] == len(self.species):
self.natoms = self.position.shape[1]
else:
raise ValueError("Natoms cannot be defined")
if len(self.metadata_attrs) > len(list(set(self.metadata_attrs))):
raise ValueError(f"repeated keys in self.metadata_attrs")
if len(self.fixed_attrs) > len(list(set(self.fixed_attrs))):
raise ValueError(f"repeated keys in self.fix_attrs")
if len(set(self.fixed_attrs).intersection(set(self.per_frame_attrs))) > 0:
raise ValueError(
f"fix attr: {self.fixed_attrs} and per frame attr: {self.per_frame_attrs} has overlap"
)
def add_field(self, list_name, name, item):
if list_name == PER_FRAME_ATTRS:
if not isinstance(item, np.ndarray):
raise TypeError(f"{name} value should be np.ndarray type")
if len(item) != self.nframes and len(self.per_frame_attrs) > 0:
logging.error(
f"Error: {repr(item)}'s length {len(item)} does not match {self.nframes}"
)
raise RuntimeError
elif len(self.per_frame_attrs) == 0:
self.nframes = item.shape[0]
the_list = getattr(self, list_name)
if name in the_list:
logging.debug(f"overwriting per_frame attr {name}")
else:
the_list += [name]
setattr(self, name, item)
logging.debug(f"add a pointer of {name} to the {list_name}")
def include_frames(self, accept_id=None):
if accept_id is None:
return
for k in self.per_frame_attrs:
new_mat = getattr(self, k)
new_mat = getattr(self, k)[accept_id]
setattr(self, k, new_mat)
self.nframes = len(accept_id)
def fix_to_per_frame(self, key):
if key not in self.fixed_attrs:
return
mat = self.expand_fixed_attr(key)
setattr(self, key, mat)
self.fixed_attrs.remove(key)
self.per_frame_attrs += [key]
def expand_fixed_attr(self, key):
if key not in self.fixed_attrs:
return
mat = getattr(self, key)
if isinstance(mat, np.ndarray):
mat = np.tile(
np.expand_dims(mat, axis=0), (self.nframes) + (1) * len(mat.shape)
)
else:
mat = [mat] * self.nframes
return mat
@classmethod
def from_file(cls, filename, update_dict={}, mapping={}):
obj = load_file(
supported_formats={"npz": "npz", "pickle": "pickle"}, filename=filename
)
if isinstance(obj, cls):
obj.sanity_check()
return obj
return cls.from_dict(dict(obj), update_dict=update_dict, mapping=mapping)
def to_dict(self):
return {k: getattr(self, k) for k in self.keys}
@classmethod
def from_dict(cls, input_dict, update_dict={}, mapping={}):
"""
requirement
positions: nframes x ?
optional:
cells
forces
species, or symbols
"""
trj = cls()
backward_remap = {
POSITION: POSITION + "s",
FORCE: FORCE + "S",
CELL: CELL + "s",
TOTAL_ENERGY: "energies",
}
input_dict = {k: v for k, v in input_dict.items()}
for new_name, original_name in mapping.items():
input_dict[new_name] = input_dict.pop(original_name)
for new_name, original_name in backward_remap.items():
if original_name in input_dict:
input_dict[new_name] = input_dict.pop(original_name)
input_dict.update(update_dict)
trj.nframes = input_dict[POSITION].shape[0]
if CELL in input_dict:
input_dict[CELL] = convert_cell_format(trj.nframes, input_dict[CELL])
for k in [POSITION, FORCE]:
if k in input_dict:
input_dict[k] = input_dict[k].reshape([trj.nframes, -1, 3])
trj.natoms = input_dict[POSITION].shape[1]
for k in cls.stat_keys:
if k in input_dict:
setattr(trj, k, input_dict[k])
for k in input_dict:
found = False
for attr in ["per_frame", "metadata", "fixed"]:
input_list = input_dict.get(f"{attr}_attrs", [])
if k in input_list and not found:
trj.add_field(f"{attr}_attrs", k, input_dict[k])
found = True
for attr in ["per_frame", "metadata", "fixed"]:
default_list = getattr(cls, f"default_{attr}_keys", [])
if k in default_list and not found:
trj.add_field(f"{attr}_attrs", k, input_dict[k])
found = True
trj.nframes = trj.position.shape[0]
trj.natoms = trj.position.shape[1]
remain_keys = set(list(input_dict.keys())) - set(trj.keys)
for k in remain_keys:
logging.debug(f"undefined attributes {k}, set to metadata")
try:
dim0 = input_dict[k].shape[0]
except:
dim0 = -1
if dim0 == trj.nframes:
trj.add_field(PER_FRAME_ATTRS, k, input_dict[k])
else:
trj.add_field(METADATA_ATTRS, k, input_dict[k])
trj.sanity_check()
return trj
def copy_metadata(self, trj, exception):
for k in set(trj.metadata_attrs) - set(exception):
item = getattr(trj, k, None)
ori_item = getattr(self, k, None)
if ori_item is None and item is not None:
setattr(self, k, item)
if k not in self.metadata_attrs:
self.metadata_attrs += [k]
else:
equal = False
try:
if ori_item == item:
equal = True
except:
pass
try:
if (ori_item == item).all():
equal = True
except:
pass
try:
if np.equal(ori_item, item).all():
equal = True
except:
pass
if not equal and item is None:
ori_item = getattr(self, k, None)
logging.info(f"key {k} are not the same in the two objects")
logging.info(f" {item} {ori_item}")
def save(self, name: str, format: str = None):
save_file(
self.to_dict(),
supported_formats={"npz": "npz", "pickle": "pickle"},
filename=name,
enforced_format=format,
)
@classmethod
def stack(cls, trjs, safe_mode=True, order=None):
"""
add all frames from another trajectory instance
"""
nframes = np.sum([trj.nframes for trj in trjs])
if nframes <= 0:
return Trajectory()
if len(trjs) == 1:
trj = Trajectory()
trj.copy(trjs[0])
return trj
if order is not None:
for i, trj in enumerate(trjs):
trj.reorder_atoms(order[i])
natoms = set([trj.natoms for trj in trjs])
if len(natoms) != 1:
raise ValueError(f"cannot merge trjs with different numbers {natoms}")
if safe_mode:
labels = []
for trj in trjs:
_order, new_label = species_to_order_label(trj.species)
labels += [new_label]
labels = set(labels)
if len(labels) != 1:
raise ValueError(f"cannot merge trjs with different species {labels}")
trj0 = trjs[0]
d = trj0.to_dict()
for k in trj0.per_frame_attrs:
items = [getattr(trj, k) for trj in trjs]
try:
mat = np.stack(items, axis=0)
except Exception as e:
raise RuntimeError("fail", k, set([item.shape for item in items]), e)
d[k] = mat
d["nframes"] = nframes
d["natoms"] = list(natoms)[0]
trj = Trajectory.from_dict(d)
return trj
def add_trj(self, trj, safe_mode=True, order=None):
"""
add all frames from another trajectory instance
"""
if trj.nframes <= 0:
return
if order is not None:
trj.reorder_atoms(order)
if self.nframes == 0:
self.copy(trj)
else:
if self.natoms != trj.natoms:
raise ValueError(
f"cannot merge two trj with different numbers {self.natoms}, {trj.natoms}"
)
if safe_mode:
if not all(trj.species == self.species):
_order, new_label = species_to_order_label(trj.species)
_, old_label = species_to_order_label(self.species)
if new_label != old_label:
raise ValueError(f"cannot merge two trj with different numbers")
if order is None:
trj.reorder_atoms(_order)
for k in self.per_frame_attrs:
item = getattr(trj, k)
ori_item = getattr(self, k)
try:
mat = np.append(ori_item, item, axis=0)
except Exception as e:
raise RuntimeError("fail", k, item.shape, ori_item.shape, e)
setattr(self, k, mat)
self.copy_metadata(trj, exception=["name", "nframes", "natom", "filenames"])
self.nframes += trj.nframes
def reshape(self, key, shape):
mat = getattr(self, k)
if isinstance(mat, np.ndarray):
setattr(self, k, mat.reshape(tuple([self.nframes]) + shape))
def flatten(self):
for k in self.per_frame_attrs:
self.reshape(k, tuple([-1]))
for k in self.fixed_attrs:
self.reshape(k, tuple([-1]))
def extract_frames(self, frame_list):
trj = type(self)()
trj.copy(self)
trj.include_frames(frame_list)
trj.name = f"{self.name}_{trj.nframes}"
return trj
def reorder_atoms(self, order):
if len(order) > self.natoms:
logging.error(
f"{len(order)} order should be smaller than {self.natoms} lines"
)
raise RuntimeError()
for k in self.per_frame_attrs:
ori_item = getattr(self, k)
if len(ori_item.shape) > 1:
if ori_item.shape[1] == self.natoms:
item = np.swapaxes(ori_item, 0, 1)
item = item[order]
item = np.swapaxes(item, 0, 1)
setattr(self, k, item)
self.species = np.array(self.species)[order]
natoms = self.position.shape[1]
if natoms != self.natoms:
logging.info(f"extract_frames {self.natoms} lines to {natoms} lines")
self.natoms = natoms
self.sanity_check()
def shuffle(self):
frame_list =
|
np.random.permutation(self.nframes)
|
numpy.random.permutation
|
import math
from typing import Iterable, Union
import numpy as np
from shor.errors import CircuitError
from shor.layers import _Layer
from shor.utils.collections import flatten
QbitOrIterable = Union[int, Iterable]
class _Gate(_Layer):
"""Abstract base quantum gate class
A quantum gate operates on qbits only, and represents a pure (unitary) quantum operation.
input_length = valid length of input qbits
qbits = indices of qbits, to be used as input to gate.
Attributes
__________
input_length : int
valid length of input qbits
qbits : int
indices of qbits, to be used as input to gate
Methods
-------
symbol(self): Returns matrix symbol as lower case for provider transpiler
qbits(self):Returns qbit indices associated with applying the gate
to_gates(self):Returns number of gate objects applied to input qbit array. Ex: H([1,2]) will
return two hadamard gates objects applied to qbits indexed to 1 and 2
num_states(self):Returns the number of states associated with a state vector and its gate matrix
to_matrix(self): Return unitary matrix form of gate object
matrix(self): Call to_matrix which returns matrix form of gate object
invert(self): Return self
I(self): Calls invert(self)
"""
@property
def symbol(self):
return self.__class__.__name__.lower()
def __init__(self, *qbits: QbitOrIterable, **kwargs):
"""
Parameters
__________
qbits : int
qbits to which the gate is being applied
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
super().__init__(**kwargs)
self.qbits = flatten(qbits) if qbits else [0]
self.dimension = kwargs.get("dimension", 1)
assert all(map(lambda q: type(q) == int, self.qbits)), str(self.qbits)
try:
assert len(self.qbits) % self.dimension == 0
except AssertionError:
raise CircuitError(
f"The input qbits length {len(self.qbits)} is not divisible by the '{self.symbol}' "
f"gate's dimension {self.dimension}"
)
@property
def qubits(self):
"""Alias for qbits. Both refer to qbit inputs for this gate."""
return self.qbits
def to_gates(self):
"""Returns gate objects applied to provided qbit indices
Returns
-------
_Gate Object
The _Gate Objects applied to the provided qbits. Ex: H([1,2]) will return two gate objects such as
H(1) and H(2)
"""
if len(self.qbits) > self.dimension:
return [
self.__class__(self.qbits[i: i + self.dimension]) for i in range(0, len(self.qbits), self.dimension)
]
return [self]
@property
def num_states(self):
return np.power(2, self.dimension)
def to_matrix(self) -> np.ndarray:
return np.eye(self.num_states())
@property
def matrix(self):
return self.to_matrix()
def invert(self):
return self
@property
def I(self):
return self.invert()
def __invert__(self):
return self.invert()
class CNOT(_Gate):
"""
Apply the controlled X gate to control and target qbit
Inherits from gate class
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
list of length 2 containing qbit indices to which the controlled X gate is applied
dimension : int
number of qbits to which the controlled X gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "CX"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit indices apply CNOT gate. First is control
qbit and the second applies the Pauli-X gate to the target qbit.
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 2
if not qbits:
qbits = [0, 1]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2**n where n is the number of qbits
"""
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
class CY(_Gate):
"""
Apply the controlled Y gate to control and target qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
list of length 2 containing qbit indices controlled Y gate is applied to
dimension : int
number of qbits to which the controlled Y gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "CY"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit indices apply CY gate. First is control
qbit and the second applies the Pauli-Y gate to the target qbit.
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 2
if not qbits:
qbits = [0, 1]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2**n where n is the number of qbits
"""
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1 * 1j], [0, 0, 1j, 0]])
class CSWAP(_Gate):
"""
Apply the Fredkin gate or sometimes called the CSWAP gate to three qbits
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit indices CSWAP gate is applied to: the first being a control qbit and the
SWAP gate is applied to the second and third target qbits.
dimension : int
number of qbits to which the CSWAP gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "CSWAP"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit indexes apply CSWAP gate. First is control qbits and the second/third applies the SWAP gate.
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 3
if not qbits:
qbits = [0, 1, 2]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2**n where n is the number of qbits
"""
cswap_matrix = np.eye(8)
cswap_matrix[:, [5, 6]] = cswap_matrix[:, [6, 5]]
return cswap_matrix
class Hadamard(_Gate):
"""
Apply the hadamard gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index to which the hadamard gate is applied
dimension : int
number of qbits to which the hardamard gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "H"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index to which the hadamard gate is applied.
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2**n where n is the number of qbits
"""
return np.multiply(np.divide(1, np.sqrt(self.num_states)), np.array([[1, 1], [1, -1]]))
class PauliX(_Gate):
"""
Apply the PauliX gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index to which the PauliX gate is applied
dimension : int
number of qbits to which the PauliX gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "X"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index to which the PauliX gate is applied
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2**n where n is the number of qbits
"""
return np.array([[0, 1], [1, 0]])
class PauliY(_Gate):
"""
Apply the PauliY gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit indices to which the PauliY gate is applied
dimension : int
number of qbits to which the PauliY gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "Y"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index to which the PailiY gate is applied
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2**n where n is the number of qbits
"""
return np.array([[0, -1j], [1j, 0]])
class PauliZ(_Gate):
"""
Apply the PauliZ gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit indices to which the PauliZ gate is applied
dimension : int
number of qbits to which the PauliZ gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "Z"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index to which the PauliZ gate is applied
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2**n where n is the number of qbits
"""
return np.array([[1, 0], [0, -1]])
class QFT(_Gate):
"""
Apply the Quantum Fourier Transform gate to two qbits
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit indices to which the QFT gate is applied
dimension : int
number of qbits to which the QFT gate is applied
Methods
-------
get_nth_unity_root(self,k)
to_matrixSelf) -> np.ndarray
Returns matrix form of gate as numpy array
"""
# TODO: add documentation of class methods
def __init__(self, *qbits, **kwargs):
if not qbits:
qbits = [0, 1]
super().__init__(*qbits, dimension=len(qbits), **kwargs)
# def to_gates(self):
# # TODO: translate this gate to base gates / CNOTs
# pass
def get_nth_unity_root(self, k):
return np.exp((2j * np.pi * k) / self.num_states)
def to_matrix(self) -> np.ndarray:
m = np.array(np.ones((self.num_states, self.num_states)), dtype="complex")
for i in range(1, self.num_states):
for j in range(i, self.num_states):
w = self.get_nth_unity_root(i * j)
m[i, j] = w
m[j, i] = w
return np.around(np.multiply(1 / np.sqrt(self.num_states), m), decimals=15)
class SWAP(_Gate):
"""
Apply the SWAP gate to two qbits
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit indices SWAP gate is applied to
dimension : int
number of qbits SWAP gate is applied to
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "SWAP"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index SWAP gate is applied to
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 2
if not qbits:
qbits = [0, 1]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2**n where n is the number of qbits
"""
return np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
class Cx(_Gate):
"""
Apply the the Cx gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit indices Cx gate is applied to
dimension : int
number of qbits Cx gate is applied to
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "CX"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit indexes apply Cx gate. First is the control
qbit and second the target to which the X gate is applied.
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 2
if not qbits:
qbits = [0, 1]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 4x4
"""
return np.array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])
class CCNOT(_Gate):
"""
Apply the CCNOT or CCX gate to three qbits. The first and second qbit being control qits and
the paulix gate is applied to the third qbit.
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit indices to which CCNOT gate is applied
dimension : int
number of qbits CCNOT gate is applied to
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "CCX"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit indices apply CCNOT gate. First and second are control
qbits and the third applies the Pauli-x gate to the third qbit.
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 3
if not qbits:
qbits = [0, 1, 2]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 9x9
"""
return np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
]
)
class CRZ(_Gate):
"""
Apply the CRZ gate to two qbits. The first being the control qbit
and the Rz gate is applied to the target qbit.
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index CRZ gate is applied to
angle : float
angle by which the CRZ gate rotates the second target qbit around the z-axis
dimension : int
number of qbits CRZ gate is applied to
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "CRZ"
def __init__(self, *qbits, angle=0, **kwargs):
"""
Parameters
__________
qbits : int
qbit indexes apply CRZ gate. First is a control
qbit and the second applies the RZ gate to the third qbit.
angle : float
angle by which the CRZ gate rotates the second target qbit around the z-axis
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 2
self.angle = angle
if not qbits:
qbits = [0, 1]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 4x4
"""
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, np.exp(-1j * self.angle / 2), 0],
[0, 0, 0, np.exp(1j * self.angle / 2)],
]
)
class CH(_Gate):
"""
Apply the CH gate to two qbits. The first being the control qbit
and the second applies the hadarmard gate to the target qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index CH gate is applied to
dimension : int
number of qbits CH gate is applied to
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "CH"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit indexes apply CH gate. First is control
qbit second applies the H gate to the target qbit.
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 2
if not qbits:
qbits = [0, 1]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 4x4
"""
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1 / np.sqrt(2), 1 / np.sqrt(2)],
[0, 0, 1 / np.sqrt(2), -1 / np.sqrt(2)],
]
)
class S(_Gate):
"""
Apply the S gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index S gate is applied to
dimension : int
number of qbits S gate is applied to
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "S"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index S gate is applied to
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array([[1, 0], [0, 1j]])
class Sdg(_Gate):
"""
Apply the Sdg gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index to which the Sdg gate is applied
dimension : int
number of qbits to which the Sdg gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "Sdg"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index Sdg gate is applied to
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array([[1, 0], [0, -1j]])
class T(_Gate):
"""
Apply the T gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index to which the T gate is applied
dimension : int
number of qbits to which the T gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "T"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index to which T gate is applied
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array([[1, 0], [0, np.exp(1j * np.pi / 4)]])
class Tdg(_Gate):
"""
Apply the Tdg gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index to which Tdg gate is applied
dimension : int
number of qbits to which Tdg gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "Tdg"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index to which Tdg gate is applied
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array([[1, 0], [0, np.exp(-1j * np.pi / 4)]])
class ID(_Gate):
"""
Apply the identity (ID) gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index to which ID gate is applied
dimension : int
number of qbits to which ID gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "I"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit index ID gate is applied to
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array([[1, 0], [0, 1]])
class U1(_Gate):
"""
Apply the U1 gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index U1 gate is applied to
angle : float
angle used to rotate qbit of choice around the z-axis
dimension : int
number of qbits U1 gate is applied to
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "U1"
def __init__(self, *qbits, angle=0, **kwargs):
"""
Parameters
__________
qbits : int
qbit indexes apply CRZ gate. First is a control
qbit and the second applies the RZ gate to the third qbit.
angle : float
angle used to rotate qbit of choice around the z-axis
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
self.angle = angle
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array([[1, 0], [0, np.exp(1j * self.angle)]])
class Cz(_Gate):
"""
Apply the Cz gate to a two qbits. First is control qbit and PauliZ gate is applied to the target qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index U1 gate is applied to
dimension : int
number of qbits U1 gate is applied to
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "CZ"
def __init__(self, *qbits, **kwargs):
"""
Parameters
__________
qbits : int
qbit indexes apply CZ gate. First is a control
qbit and the Z gate to the target qbit.
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 2
if not qbits:
qbits = [0, 1]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 4x4
"""
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
class Rx(_Gate):
"""
Apply the Rx gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index tow which Rx gate is applied
dimension : int
number of qbits to which Rx gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "RX"
def __init__(self, *qbits, angle=math.pi / 2, **kwargs):
"""
Parameters
__________
qbits : integer
qbit being rotated
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
self.angle = angle
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array(
[
[math.cos(self.angle / 2), -math.sin(self.angle / 2) * 1j],
[-math.sin(self.angle / 2) * 1j, math.cos(self.angle / 2)],
]
)
class Ry(_Gate):
"""
Apply the Ry gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index to which the Ry gate is applied
dimension : int
number of qbits to which the Ry gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "RY"
def __init__(self, *qbits, angle=math.pi / 2, **kwargs):
"""
Parameters
__________
qbits : integer
qbit being rotated
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
self.angle = angle
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array(
[
[math.cos(self.angle / 2), -math.sin(self.angle / 2)],
[math.sin(self.angle / 2), math.cos(self.angle / 2)],
]
)
class Rz(_Gate):
"""
Apply the Rz gate to a single qbit
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index to which the Rz gate is applied
dimension : int
number of qbits to which the Rz gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "RZ"
def __init__(self, *qbits, angle, **kwargs):
"""
Parameters
__________
qbits : integer
qbit being rotated
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
self.angle = angle
kwargs["dimension"] = 1
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array(
[[np.exp(-(1 / 2) * 1j * self.angle), 0], [0, np.exp((1 / 2) * 1j * self.angle)]], dtype="complex"
)
class U3(_Gate):
"""
apply U3 gate for single qbit rotation with 3 euler angles
Attributes
__________
symbol : str
a string used to represent gate for provider transpiler
qbits : int
qbit index to which the Rz gate is applied
theta : float
first angle used to rotate single qbit
phi : float
second angle used to rotate single qbit
lam : float
third angle used to rotate single qbit
dimension : int
number of qbits to which the Rz gate is applied
Methods
-------
to_matrix(self) -> np.ndarray
Returns matrix form of gate as numpy array
"""
symbol = "U3"
def __init__(self, *qbits, theta=0, phi=0, lam=0, **kwargs):
"""
Parameters
__________
qbits : integer
qbit being rotated
theta : float
angle used to rotate single qbit
phi : float
angle used to rotate single qbit
lam : float
angle used to rotate single qbit
kwargs : dictionary containing key value arguments
keyword - dimension - the number of input qbits
"""
kwargs["dimension"] = 1
self.theta = theta
self.phi = phi
self.lam = lam
if not qbits:
qbits = [0]
super().__init__(*qbits, **kwargs)
def to_matrix(self) -> np.ndarray:
"""Returns matrix form of gate as numpy array
Returns
-------
numpy array
matrix form of gate with the size 2x2
"""
return np.array(
[
[math.cos(self.theta / 2), -np.exp(1j * self.lam) * math.sin(self.theta / 2)],
[
np.exp(1j * self.phi) * math.sin(self.theta / 2),
|
np.exp(1j * (self.phi + self.lam))
|
numpy.exp
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 18:18:29 2020
@author: DiviyaT
"""
import numpy as np
import pandas as pd
from scipy import io
import timeit
from sklearn.svm import LinearSVC
from sklearn.feature_selection import RFE
in_file = io.loadmat('colon.mat')
X = pd.DataFrame(in_file['X'], dtype=float)
y = pd.DataFrame(in_file['Y'])
# convert classes from whatever datatype they are to binary integers (0 and 1)
y_values = np.unique(y)
if len(y_values) > 2:
raise errors.NonBinaryTargets()
y_binary = np.array(y == y_values[0], dtype=int)
X = X.values
y = np.reshape(y_binary, -1)
def apply_SVM_RFE(X, y, **kwargs):
n_features = kwargs.get('n_features', 1)
step = kwargs.get('step', 1)
feature_subset = np.arange(X.shape[1])
feature_idx_elimination_order = []
for i in range(n_features, 0, -step):
X_set = X[:, feature_subset]
svc = LinearSVC(dual = False)
rfe = RFE(svc, i, step=step, verbose=1)
rfe.fit(X_set, y)
boolean_mask = rfe.get_support(indices=False)
pruned_feature_indices = feature_subset[
|
np.invert(boolean_mask)
|
numpy.invert
|
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import tf_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='contrastnet', help='Model name: contrastnet')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=512, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=600, help='Epoch to run [default: 250]')
parser.add_argument('--batch_size', type=int, default=40, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL_PATH = FLAGS.model_path
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 2048
NUM_CLASSES = 2
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
# ModelNet40 official train/test split
TRAIN_FILES = provider.getDataFiles( \
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048_cut/train_files.txt'))
TEST_FILES = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048_cut/test_files.txt'))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl_1, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
pointclouds_pl_2, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, feature1, feature2, end_points = MODEL.get_model(pointclouds_pl_1, pointclouds_pl_2, is_training_pl, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver(max_to_keep = 10)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
#merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl_1': pointclouds_pl_1,
'pointclouds_pl_2': pointclouds_pl_2,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
if epoch % 40 == 0 and epoch >= 120:
save_path = saver.save(sess, os.path.join(LOG_DIR, 'epoch_' + str(epoch)+'.ckpt'))
log_string("Model saved in file: %s" % save_path)
elif epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, 'model.ckpt'))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train files
train_file_idxs = np.arange(0, len(TRAIN_FILES))
np.random.shuffle(train_file_idxs)
current_data_1 = np.empty([3*len(TRAIN_FILES), NUM_POINT, 3], dtype=float)
current_data_2 = np.empty([3*len(TRAIN_FILES), NUM_POINT, 3], dtype=float)
current_label = np.empty([3*len(TRAIN_FILES),1], dtype=int)
fn = 0
count = 0
while fn < len(TRAIN_FILES) - 1:
# log_string('----' + str(fn) + '-----')
total_current = [];
a1, a2, _ = provider.loadDataFile_cut_2(TRAIN_FILES[train_file_idxs[fn]])
idx = np.random.randint(a1.shape[0], size=NUM_POINT)
a1 = a1[idx,:]
idx = np.random.randint(a2.shape[0], size=NUM_POINT)
a2 = a2[idx,:]
total_current.append(a1)
total_current.append(a2)
fn = fn + 1;
b1, b2, _ = provider.loadDataFile_cut_2(TRAIN_FILES[train_file_idxs[fn]])
idx = np.random.randint(b1.shape[0], size=NUM_POINT)
b1 = b1[idx,:]
idx = np.random.randint(b2.shape[0], size=NUM_POINT)
b2 = b2[idx,:]
total_current.append(b1)
total_current.append(b2)
fn = fn + 1;
pair_num = 0
for index in range(len(total_current)):
for index2 in range(index + 1, len(total_current)):
current_data_1[6*count+pair_num,:,:] = total_current[index]
current_data_2[6*count+pair_num, :,:] = total_current[index2]
if (index < 2) and (index2 >= 2):
current_label[6*count+pair_num,:] = 0
else:
current_label[6*count+pair_num,:] = 1
pair_num = pair_num + 1
count = count + 1
current_label = np.squeeze(current_label)
file_size = current_data_1.shape[0]
num_batches = file_size // BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
# shuffle each batch
data_1 = current_data_1[start_idx:end_idx, :, :]
data_2 = current_data_2[start_idx:end_idx, :, :]
label = current_label[start_idx:end_idx]
combine_data =
|
np.concatenate((data_1, data_2), axis=2)
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
"""Radial depending sensitivities."""
import os
import glob
import numpy as np
from scipy.interpolate import UnivariateSpline as uvs
import matplotlib.pyplot as plt
from mpi4py import MPI
import welltestpy as wtp
import anaflow as ana
# plotting style
plt.style.use("ggplot")
# increase fontsize of plots, prevent type 3 fonts in pdf output
plt.rcParams.update({"font.size": 16, "pdf.fonttype": 42, "ps.fonttype": 42})
# file extension of the saved plots
file_ext = ".pdf"
# rank is the actual core-number, size is total number of cores
rank = MPI.COMM_WORLD.Get_rank()
size = MPI.COMM_WORLD.Get_size()
# paths
here = os.path.abspath(os.path.dirname(__file__))
results = os.path.normpath(
os.path.join(here, "..", "results", "01_estimate", "rad_sens")
)
def dashes(i=1, max_d=12, space=1):
"""Dashes for matplotlib."""
return i * [space, space] + [max_d - 2 * i * space, space]
def lin(xp, fp):
"""
Linear interpolation of given values as a callable function.
Parameters
----------
xp : list of float
The x values of the data points.
fp : list of float
The function values of the data points.
Returns
-------
callable
The linear interpolation.
"""
def inter(x):
return np.interp(x, xp, fp)
return inter
def harm_mu(mu, var):
"""
Recalculate mu to get the exponent for the harmonic mean in log-norm.
Parameters
----------
mu : float
Mean of the log-normal distribution.
var : TYPE
Variance of the log-normal distribution.
Returns
-------
float
Recalculated mean.
"""
var = 0 if var is None else var
return mu - 0.5 * var
def est_sens(
rad,
mu=-9,
lnS=-7,
fix=True,
dummy=False,
fix_var=None,
harm=False,
var_mean=5,
):
"""
Estimate radial depending sensitivities.
Parameters
----------
rad : float
Radial distance to calculate the sensitivity at.
mu : float, optional
Mean of the log-normal distribution. The default is -9.
lnS : float, optional
log-storativity. The default is -7.
fix : bool, optional
Whether to fix mu and lnS (only var and len_scale estimated).
The default is True.
dummy : bool, optional
Whether to use a dummy paramter. The default is False.
fix_var : float, optional
Whether to fix the variance. The default is None.
harm : bool, optional
Whether to use the harmonic mean for the reference type-curve.
Harmonic mean is calculated from fix_var, if given, or var_mean.
The default is False.
"""
var_flag = fix_var is not None
root = results
root += "_all" if not fix else ""
root += "_dummy" if dummy else ""
root += "_fix_var" if var_flag else ""
root += "_harm" if harm else ""
# recalculate mu to get the harm-mean if wanted
harm_var = var_mean if (harm and not var_flag) else fix_var
mu = harm_mu(mu, harm_var)
val_fix = {"mu": mu, "lnS": lnS} if fix else {}
if var_flag:
val_fix["var"] = fix_var
rad = float(rad)
prate = -1
time = np.geomspace(10, 7200, 10)
drawdown = ana.theis(time, rad, np.exp(lnS), np.exp(mu), prate)
campaign = wtp.data.Campaign(name="sens-campaign")
campaign.add_well(name="well_0", radius=0, coordinates=(0.0, 0.0))
campaign.add_well(name="well_1", radius=0, coordinates=(rad, 0.0))
pumptest = wtp.data.PumpingTest("well_0", "well_0", prate)
pumptest.add_transient_obs("well_1", time, drawdown)
campaign.addtests(pumptest)
estimation = wtp.estimate.ExtTheis2D(
"est", campaign, val_fix=val_fix, generate=True
)
if dummy:
estimation.gen_setup(dummy=dummy)
estimation.sensitivity(folder=os.path.join(root, "rad_" + str(rad)))
def post_all_sens(
save=True,
smooth=None,
fix=True,
dummy=False,
fix_var=None,
harm=False,
plt_dummy=False,
typ="ST",
):
"""
Post-process sensitivity analysis.
Parameters
----------
save : bool, optional
Whether to save the plot. The default is True.
smooth : bool, optional
Whether to smooth the result. The default is None.
fix : bool, optional
Whether mu and ln(S) were fixed. The default is True.
dummy : bool, optional
Whether a dummy paramter was used. The default is False.
fix_var : float, optional
The used fixed variance if any. The default is None.
harm : bool, optional
Whether the Theis(T_harm) solution was used as reference.
The default is False.
plt_dummy : bool, optional
Whether to plot the dummy result. The default is False.
typ : str, optional
The type of the FAST result. Either "ST" for total sensitivity,
or "S1" for first order sensitivity. The default is "ST".
"""
var_flag = fix_var is not None
root = results
root += "_all" if not fix else ""
root += "_dummy" if dummy else ""
root += "_fix_var" if var_flag else ""
root += "_harm" if harm else ""
radii_dir = glob.glob(os.path.join(root, "rad*"))
radii_dir.sort() # sorting by radii
radii = []
rad_Si = []
sig2_sen = []
corr_sen = []
mu_sen = []
lnS_sen = []
dum_sen = []
for rad_dir in radii_dir:
radii.append(float(os.path.basename(rad_dir)[4:]))
ST_files = glob.glob(os.path.join(rad_dir, "*_FAST_estimate.txt"))
ST_files.sort() # use the latest estimation
S1_files = glob.glob(os.path.join(rad_dir, "*_FAST_estimate_S1.txt"))
S1_files.sort() # use the latest estimation
ST = np.loadtxt(ST_files[-1])
S1 =
|
np.loadtxt(S1_files[-1])
|
numpy.loadtxt
|
# Copyright 2021 The ODE-LSTM Authors. All Rights Reserved.
import numpy as np
import os
import tensorflow as tf
from tqdm import tqdm
class Walker2dImitationData:
def __init__(self, seq_len):
self.seq_len = seq_len
all_files = sorted(
[
os.path.join("data/walker", d)
for d in os.listdir("data/walker")
if d.endswith(".npy")
]
)
self.rng = np.random.RandomState(891374)
np.random.RandomState(125487).shuffle(all_files)
# 15% test set, 10% validation set, the rest is for training
test_n = int(0.15 * len(all_files))
valid_n = int((0.15 + 0.1) * len(all_files))
test_files = all_files[:test_n]
valid_files = all_files[test_n:valid_n]
train_files = all_files[valid_n:]
train_x, train_t, train_y = self._load_files(train_files)
valid_x, valid_t, valid_y = self._load_files(valid_files)
test_x, test_t, test_y = self._load_files(test_files)
train_x, train_t, train_y = self.perturb_sequences(train_x, train_t, train_y)
valid_x, valid_t, valid_y = self.perturb_sequences(valid_x, valid_t, valid_y)
test_x, test_t, test_y = self.perturb_sequences(test_x, test_t, test_y)
self.train_x, self.train_times, self.train_y = self.align_sequences(
train_x, train_t, train_y
)
self.valid_x, self.valid_times, self.valid_y = self.align_sequences(
valid_x, valid_t, valid_y
)
self.test_x, self.test_times, self.test_y = self.align_sequences(
test_x, test_t, test_y
)
self.input_size = self.train_x.shape[-1]
print("train_times: ", str(self.train_times.shape))
print("train_x: ", str(self.train_x.shape))
print("train_y: ", str(self.train_y.shape))
def align_sequences(self, set_x, set_t, set_y):
times = []
x = []
y = []
for i in range(len(set_y)):
seq_x = set_x[i]
seq_t = set_t[i]
seq_y = set_y[i]
for t in range(0, seq_y.shape[0] - self.seq_len, self.seq_len // 4):
x.append(seq_x[t : t + self.seq_len])
times.append(seq_t[t : t + self.seq_len])
y.append(seq_y[t : t + self.seq_len])
return (
np.stack(x, axis=0),
np.expand_dims(np.stack(times, axis=0), axis=-1),
np.stack(y, axis=0),
)
def perturb_sequences(self, set_x, set_t, set_y):
x = []
times = []
y = []
for i in range(len(set_y)):
seq_x = set_x[i]
seq_y = set_y[i]
new_x, new_times = [], []
new_y = []
skip = 0
for t in range(seq_y.shape[0]):
skip += 1
if self.rng.rand() < 0.9:
new_x.append(seq_x[t])
new_times.append(skip)
new_y.append(seq_y[t])
skip = 0
x.append(np.stack(new_x, axis=0))
times.append(np.stack(new_times, axis=0))
y.append(np.stack(new_y, axis=0))
return x, times, y
def _load_files(self, files):
all_x = []
all_t = []
all_y = []
for f in files:
arr = np.load(f)
x_state = arr[:-1, :].astype(np.float32)
y = arr[1:, :].astype(np.float32)
x_times = np.ones(x_state.shape[0])
all_x.append(x_state)
all_t.append(x_times)
all_y.append(y)
print("Loaded file '{}' of length {:d}".format(f, x_state.shape[0]))
return all_x, all_t, all_y
class ETSMnistData:
def __init__(self, time_major, pad_size=256):
self.threshold = 128
self.pad_size = pad_size
if not self.load_from_cache():
self.create_dataset()
self.train_elapsed /= self.pad_size
self.test_elapsed /= self.pad_size
def load_from_cache(self):
if os.path.isfile("dataset/test_mask.npy"):
self.train_events = np.load("dataset/train_events.npy")
self.train_elapsed = np.load("dataset/train_elapsed.npy")
self.train_mask = np.load("dataset/train_mask.npy")
self.train_y = np.load("dataset/train_y.npy")
self.test_events = np.load("dataset/test_events.npy")
self.test_elapsed = np.load("dataset/test_elapsed.npy")
self.test_mask = np.load("dataset/test_mask.npy")
self.test_y = np.load("dataset/test_y.npy")
print("train_events.shape: ", str(self.train_events.shape))
print("train_elapsed.shape: ", str(self.train_elapsed.shape))
print("train_mask.shape: ", str(self.train_mask.shape))
print("train_y.shape: ", str(self.train_y.shape))
print("test_events.shape: ", str(self.test_events.shape))
print("test_elapsed.shape: ", str(self.test_elapsed.shape))
print("test_mask.shape: ", str(self.test_mask.shape))
print("test_y.shape: ", str(self.test_y.shape))
return True
return False
def transform_sample(self, x):
x = x.flatten()
events = np.zeros([self.pad_size, 1], dtype=np.float32)
elapsed = np.zeros([self.pad_size, 1], dtype=np.float32)
mask = np.zeros([self.pad_size], dtype=np.bool)
last_char = -1
write_index = 0
elapsed_counter = 0
for i in range(x.shape[0]):
elapsed_counter += 1
char = int(x[i] > self.threshold)
if last_char != char:
events[write_index] = char
elapsed[write_index] = elapsed_counter
mask[write_index] = True
write_index += 1
if write_index >= self.pad_size:
# Enough 1s in this sample, abort
self._abort_counter += 1
break
elapsed_counter = 0
last_char = char
self._all_lenghts.append(write_index)
return events, elapsed, mask
def transform_array(self, x):
events_list = []
elapsed_list = []
mask_list = []
for i in tqdm(range(x.shape[0])):
events, elapsed, mask = self.transform_sample(x[i])
events_list.append(events)
elapsed_list.append(elapsed)
mask_list.append(mask)
return (
np.stack(events_list, axis=0),
np.stack(elapsed_list, axis=0),
np.stack(mask_list, axis=0),
)
def create_dataset(self):
(train_x, train_y), (test_x, test_y) = tf.keras.datasets.mnist.load_data()
self._all_lenghts = []
self._abort_counter = 0
train_x = train_x.reshape([-1, 28 * 28])
test_x = test_x.reshape([-1, 28 * 28])
self.train_y = train_y
self.test_y = test_y
print("Transforming training samples")
self.train_events, self.train_elapsed, self.train_mask = self.transform_array(
train_x
)
print("Transforming test samples")
self.test_events, self.test_elapsed, self.test_mask = self.transform_array(
test_x
)
print("Average time-series length: {:0.2f}".format(np.mean(self._all_lenghts)))
print("Abort counter: ", str(self._abort_counter))
os.makedirs("dataset", exist_ok=True)
np.save("dataset/train_events.npy", self.train_events)
np.save("dataset/train_elapsed.npy", self.train_elapsed)
np.save("dataset/train_mask.npy", self.train_mask)
np.save("dataset/train_y.npy", self.train_y)
np.save("dataset/test_events.npy", self.test_events)
np.save("dataset/test_elapsed.npy", self.test_elapsed)
np.save("dataset/test_mask.npy", self.test_mask)
np.save("dataset/test_y.npy", self.test_y)
class PersonData:
class_map = {
"lying down": 0,
"lying": 0,
"sitting down": 1,
"sitting": 1,
"standing up from lying": 2,
"standing up from sitting": 2,
"standing up from sitting on the ground": 2,
"walking": 3,
"falling": 4,
"on all fours": 5,
"sitting on the ground": 6,
}
sensor_ids = {
"010-000-024-033": 0,
"010-000-030-096": 1,
"020-000-033-111": 2,
"020-000-032-221": 3,
}
def __init__(self, seq_len=32):
self.seq_len = seq_len
self.num_classes = 7
all_x, all_t, all_y = self.load_crappy_formated_csv()
all_x, all_t, all_y = self.cut_in_sequences(
all_x, all_t, all_y, seq_len=seq_len, inc=seq_len // 2
)
print("all_x.shape: ", str(all_x.shape))
print("all_t.shape: ", str(all_t.shape))
print("all_y.shape: ", str(all_y.shape))
total_seqs = all_x.shape[0]
print("Total number of sequences: {}".format(total_seqs))
permutation = np.random.RandomState(98841).permutation(total_seqs)
test_size = int(0.2 * total_seqs)
self.test_x = all_x[permutation[:test_size]]
self.test_y = all_y[permutation[:test_size]]
self.test_t = all_t[permutation[:test_size]]
self.train_x = all_x[permutation[test_size:]]
self.train_t = all_t[permutation[test_size:]]
self.train_y = all_y[permutation[test_size:]]
self.feature_size = int(self.train_x.shape[-1])
print("train_x.shape: ", str(self.train_x.shape))
print("train_t.shape: ", str(self.train_t.shape))
print("train_y.shape: ", str(self.train_y.shape))
print("Total number of train sequences: {}".format(self.train_x.shape[0]))
print("Total number of test sequences: {}".format(self.test_x.shape[0]))
def load_crappy_formated_csv(self):
all_x = []
all_y = []
all_t = []
series_x = []
series_t = []
series_y = []
last_millis = None
if not os.path.isfile("data/person/ConfLongDemo_JSI.txt"):
print("ERROR: File 'data/person/ConfLongDemo_JSI.txt' not found")
print("Please execute the command")
print("source download_dataset.sh")
import sys
sys.exit(-1)
with open("data/person/ConfLongDemo_JSI.txt", "r") as f:
current_person = "A01"
for line in f:
arr = line.split(",")
if len(arr) < 6:
break
if arr[0] != current_person:
# Enque and reset
series_x = np.stack(series_x, axis=0)
series_t = np.stack(series_t, axis=0)
series_y = np.array(series_y, dtype=np.int32)
all_x.append(series_x)
all_t.append(series_t)
all_y.append(series_y)
last_millis = None
series_x = []
series_y = []
series_t = []
millis = np.int64(arr[2]) / (100 * 1000)
# 100ms will be normalized to 1.0
millis_mapped_to_1 = 10.0
if last_millis is None:
elasped_sec = 0.05
else:
elasped_sec = float(millis - last_millis) / 1000.0
elasped = elasped_sec * 1000 / millis_mapped_to_1
last_millis = millis
current_person = arr[0]
sensor_id = self.sensor_ids[arr[1]]
label_col = self.class_map[arr[7].replace("\n", "")]
feature_col_2 = np.array(arr[4:7], dtype=np.float32)
# Last 3 entries of the feature vector contain sensor value
# First 4 entries of the feature vector contain sensor ID
feature_col_1 = np.zeros(4, dtype=np.float32)
feature_col_1[sensor_id] = 1
feature_col = np.concatenate([feature_col_1, feature_col_2])
series_x.append(feature_col)
series_t.append(elasped)
series_y.append(label_col)
return all_x, all_t, all_y
def cut_in_sequences(self, all_x, all_t, all_y, seq_len, inc=1):
sequences_x = []
sequences_t = []
sequences_y = []
for i in range(len(all_x)):
x, t, y = all_x[i], all_t[i], all_y[i]
for s in range(0, x.shape[0] - seq_len, inc):
start = s
end = start + seq_len
sequences_x.append(x[start:end])
sequences_t.append(t[start:end])
sequences_y.append(y[start:end])
return (
np.stack(sequences_x, axis=0),
np.stack(sequences_t, axis=0).reshape([-1, seq_len, 1]),
np.stack(sequences_y, axis=0),
)
class XORData:
def __init__(self, time_major, event_based=True, pad_size=24):
self.pad_size = pad_size
self.event_based = event_based
self._abort_counter = 0
if not self.load_from_cache():
self.create_dataset()
self.train_elapsed /= self.pad_size
self.test_elapsed /= self.pad_size
def load_from_cache(self):
if os.path.isfile("dataset/xor_test_y.npy"):
self.train_events = np.load("dataset/xor_train_events.npy")
self.train_elapsed = np.load("dataset/xor_train_elapsed.npy")
self.train_mask = np.load("dataset/xor_train_mask.npy")
self.train_y = np.load("dataset/xor_train_y.npy")
self.test_events = np.load("dataset/xor_test_events.npy")
self.test_elapsed = np.load("dataset/xor_test_elapsed.npy")
self.test_mask = np.load("dataset/xor_test_mask.npy")
self.test_y = np.load("dataset/xor_test_y.npy")
print("train_events.shape: ", str(self.train_events.shape))
print("train_elapsed.shape: ", str(self.train_elapsed.shape))
print("train_mask.shape: ", str(self.train_mask.shape))
print("train_y.shape: ", str(self.train_y.shape))
print("test_events.shape: ", str(self.test_events.shape))
print("test_elapsed.shape: ", str(self.test_elapsed.shape))
print("test_mask.shape: ", str(self.test_mask.shape))
print("test_y.shape: ", str(self.test_y.shape))
return True
return False
def create_event_based_sample(self, rng):
label = 0
events = np.zeros([self.pad_size, 1], dtype=np.float32)
elapsed = np.zeros([self.pad_size, 1], dtype=np.float32)
mask = np.zeros([self.pad_size], dtype=np.bool)
last_char = -1
write_index = 0
elapsed_counter = 0
length = rng.randint(low=2, high=self.pad_size)
for i in range(length):
elapsed_counter += 1
char = int(rng.randint(low=0, high=2))
label += char
if last_char != char:
events[write_index] = char
elapsed[write_index] = elapsed_counter
mask[write_index] = True
write_index += 1
elapsed_counter = 0
if write_index >= self.pad_size - 1:
# Enough 1s in this sample, abort
self._abort_counter += 1
break
last_char = char
if elapsed_counter > 0:
events[write_index] = char
elapsed[write_index] = elapsed_counter
mask[write_index] = True
label = label % 2
return events, elapsed, mask, label
def create_dense_sample(self, rng):
label = 0
events = np.zeros([self.pad_size, 1], dtype=np.float32)
elapsed = np.zeros([self.pad_size, 1], dtype=np.float32)
mask = np.zeros([self.pad_size], dtype=np.bool)
last_char = -1
write_index = 0
elapsed_counter = 0
length = rng.randint(low=2, high=self.pad_size)
for i in range(length):
elapsed_counter += 1
char = int(rng.randint(low=0, high=2))
label += char
events[write_index] = char
elapsed[write_index] = elapsed_counter
mask[write_index] = True
write_index += 1
elapsed_counter = 0
label = label % 2
label2 = int(np.sum(events)) % 2
assert label == label2
return events, elapsed, mask, label
def create_set(self, size, seed):
rng = np.random.RandomState(seed)
events_list = []
elapsed_list = []
mask_list = []
label_list = []
for i in tqdm(range(size)):
if self.event_based:
events, elapsed, mask, label = self.create_event_based_sample(rng)
else:
events, elapsed, mask, label = self.create_dense_sample(rng)
events_list.append(events)
elapsed_list.append(elapsed)
mask_list.append(mask)
label_list.append(label)
return (
np.stack(events_list, axis=0),
np.stack(elapsed_list, axis=0),
np.stack(mask_list, axis=0),
np.stack(label_list, axis=0),
)
def create_dataset(self):
print("Transforming training samples")
(
self.train_events,
self.train_elapsed,
self.train_mask,
self.train_y,
) = self.create_set(100000, 1234984)
print("Transforming test samples")
(
self.test_events,
self.test_elapsed,
self.test_mask,
self.test_y,
) = self.create_set(10000, 48736)
print("train_events.shape: ", str(self.train_events.shape))
print("train_elapsed.shape: ", str(self.train_elapsed.shape))
print("train_mask.shape: ", str(self.train_mask.shape))
print("train_y.shape: ", str(self.train_y.shape))
print("test_events.shape: ", str(self.test_events.shape))
print("test_elapsed.shape: ", str(self.test_elapsed.shape))
print("test_mask.shape: ", str(self.test_mask.shape))
print("test_y.shape: ", str(self.test_y.shape))
print("Abort counter: ", str(self._abort_counter))
os.makedirs("dataset", exist_ok=True)
np.save("dataset/xor_train_events.npy", self.train_events)
np.save("dataset/xor_train_elapsed.npy", self.train_elapsed)
np.save("dataset/xor_train_mask.npy", self.train_mask)
np.save("dataset/xor_train_y.npy", self.train_y)
np.save("dataset/xor_test_events.npy", self.test_events)
np.save("dataset/xor_test_elapsed.npy", self.test_elapsed)
|
np.save("dataset/xor_test_mask.npy", self.test_mask)
|
numpy.save
|
# import the libraries
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import lstsq
def parse_cmdline_args():
"""Parse command line arguments, define the parameters"""
try: # try converting directly to values
return [
int(sys.argv[1]) if len(sys.argv) >= 2 else 25,
int(sys.argv[2]) if len(sys.argv) >= 3 else 25,
int(sys.argv[3]) if len(sys.argv) >= 4 else 8,
int(sys.argv[4]) if len(sys.argv) >= 5 else 1500,
]
except ValueError: # otherwise, parse keyword arguments
parser = argparse.ArgumentParser(description="Resistor simulation")
parser.add_argument(
"-x", "--Nx", type=int, default=25, help="size along x axis"
)
parser.add_argument(
"-y", "--Ny", type=int, default=25, help="size along y axis"
)
parser.add_argument(
"-r", "--radius", type=int, default=8, help="radius of central lead"
)
parser.add_argument(
"-n", "--Niter", type=int, default=1500, help="number of iterations to do"
)
args = parser.parse_args()
return args.Nx, args.Ny, args.radius, args.Niter
def contruct_phi(Nx, Ny, radius):
"""Construct and plot phi, return phi, area inside radius and x,y coordinates"""
phi = np.zeros((Ny, Nx)) # allocate the potential array
X, Y = np.meshgrid(np.linspace(-0.5, 0.5, Nx), np.linspace(-0.5, 0.5, Ny))
# scaled radius = 0.35 for default args, 5% margin for floating point errors
ii = np.where(X ** 2 + Y ** 2 <= (1.05 * radius / (min(Nx, Ny) - 1)) ** 2)
phi[ii] = 1.0 # initialize the potential array
return phi, ii, X, Y
def contour_plot(phi, ii, X, Y):
"""Obtain a contour plot of the potential"""
plt.figure(figsize=(8, 8))
plt.title("Contour Plot of the Potential", fontsize=14)
plt.xlabel("$x$", fontsize=14)
plt.ylabel("$y$", fontsize=14)
plt.clabel(plt.contour(X, Y, phi))
plt.scatter(X[0, ii[1]], Y[ii[0], 0], color="r", label="$V=1$")
plt.legend()
plt.grid()
plt.show()
def iterate(phi, ii, Niter):
"""Perform Niter no. of iterations on phi, return phi and errors"""
errors =
|
np.zeros(Niter)
|
numpy.zeros
|
# Copyright (c) 2016 by <NAME> and the other collaborators on GitHub at
# https://github.com/rmjarvis/Piff All rights reserved.
#
# Piff is free software: Redistribution and use in source and binary forms
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import piff
import os
import galsim
import fitsio
import yaml
import subprocess
from piff_test_helper import get_script_name, timer
@timer
def test_twodstats():
"""Make sure we can execute and print a readout of the plot
"""
if __name__ == '__main__':
logger = piff.config.setup_logger(2)
else:
logger = None
model = piff.Gaussian(fastfit=True)
interp = piff.Polynomial(order=1) # should find that order=1 is better
# create background model
stars, true_model = generate_starlist(100)
psf = piff.SimplePSF(model, interp)
psf.fit(stars, None, None)
# check the coeffs of sigma and g2, which are actually linear fits
# skip g1 since it is actually a 2d parabola
# factor of 0.263 is to account for going from pixel xy to wcs uv
np.testing.assert_almost_equal(psf.interp.coeffs[0].flatten(),
np.array([0.4, 0, 1. / (0.263 * 2048), 0]), decimal=4)
np.testing.assert_almost_equal(psf.interp.coeffs[2].flatten(),
np.array([-0.1 * 1000 / 2048, 0, 0.1 / (0.263 * 2048), 0]),
decimal=4)
stats = piff.TwoDHistStats(number_bins_u=5, number_bins_v=5, reducing_function='np.mean')
stats.compute(psf, stars, logger=logger)
# check the twodhists
# get the average value in the bin
u_i = 3
v_i = 3
icen = stats.twodhists['u'][v_i, u_i] / 0.263
jcen = stats.twodhists['v'][v_i, u_i] / 0.263
print('icen = ',icen)
print('jcen = ',jcen)
icenter = 1000
jcenter = 2000
# the average value in the bin should match up with the model for the average coordinates
sigma, g1, g2 = psf_model(icen, jcen, icenter, jcenter)
sigma_average = stats.twodhists['T'][v_i, u_i]
g1_average = stats.twodhists['g1'][v_i, u_i]
g2_average = stats.twodhists['g2'][v_i, u_i]
# assert equal to 4th decimal
print('sigma, g1, g2 = ',[sigma,g1,g2])
print('av sigma, g1, g2 = ',[sigma_average,g1_average,g2_average])
np.testing.assert_almost_equal([sigma, g1, g2], [sigma_average, g1_average, g2_average],
decimal=2)
# Test the plotting and writing
twodstats_file = os.path.join('output','twodstats.pdf')
stats.write(twodstats_file)
# repeat for whisker
stats = piff.WhiskerStats(number_bins_u=21, number_bins_v=21, reducing_function='np.mean')
stats.compute(psf, stars)
# Test the plotting and writing
twodstats_file = os.path.join('output','whiskerstats.pdf')
stats.write(twodstats_file)
@timer
def test_shift_cmap():
from matplotlib import cm
# test vmax and vmin center issues
vmin = -1
vmax = 8
center = 2
# color map vmin > center
cmap = piff.TwoDHistStats._shift_cmap(vmin, vmax, vmin - 1)
assert cmap == cm.Reds
# color map vmax < center
cmap = piff.TwoDHistStats._shift_cmap(vmin, vmax, vmax + 1)
assert cmap == cm.Blues_r
# test without center
cmap = piff.TwoDHistStats._shift_cmap(vmin, vmax)
midpoint = (0 - vmin) * 1. / (vmax - vmin)
unshifted_cmap = cm.RdBu_r
# check segment data
# NOTE: that because of interpolation cmap(midpont) does not have to equal
# unshifted_cmap(0.5)
for val, color in zip(unshifted_cmap(0.5), ['red', 'green', 'blue', 'alpha']):
assert midpoint == cmap._segmentdata[color][128][0]
assert val == cmap._segmentdata[color][128][1]
assert val == cmap._segmentdata[color][128][2]
# but edge values are the same
assert cmap(0.) == unshifted_cmap(0.)
assert cmap(1.) == unshifted_cmap(1.)
# test with center
cmap = piff.TwoDHistStats._shift_cmap(vmin, vmax, center)
midpoint = (center - vmin) * 1. / (vmax - vmin)
unshifted_cmap = cm.RdBu_r
for val, color in zip(unshifted_cmap(0.5), ['red', 'green', 'blue', 'alpha']):
assert midpoint == cmap._segmentdata[color][128][0]
assert val == cmap._segmentdata[color][128][1]
assert val == cmap._segmentdata[color][128][2]
assert cmap(0.) == unshifted_cmap(0.)
assert cmap(1.) == unshifted_cmap(1.)
# what if vmax < vmin?
cmap = piff.TwoDHistStats._shift_cmap(vmax, vmin, center)
midpoint = 1. - (center - vmax) * 1. / (vmin - vmax)
unshifted_cmap = cm.RdBu_r
for val, color in zip(unshifted_cmap(0.5), ['red', 'green', 'blue', 'alpha']):
assert midpoint == cmap._segmentdata[color][128][0]
assert val == cmap._segmentdata[color][128][1]
assert val == cmap._segmentdata[color][128][2]
assert cmap(0.) == unshifted_cmap(1.)
assert cmap(1.) == unshifted_cmap(0.)
def make_star(icen=500, jcen=700, ccdnum=28,
sigma=1, g1=0, g2=0,
pixel_to_focal=False,
properties={},
fit_kwargs={}):
properties['ccdnum'] = ccdnum
# setting scale is crucial
stardata = piff.Star.makeTarget(x=icen, y=jcen, properties=properties,
scale=0.263)
# apply Gaussian sigma, g1, g2
params = np.array([sigma, g1, g2])
starfit = piff.StarFit(params, **fit_kwargs)
star = piff.Star(stardata.data, starfit)
return star
def psf_model(icens, jcens, icenter, jcenter):
sigmas = icens * (2. - 1.) / 2048. + 0.4
g1s = ((jcens - jcenter) / 4096.) ** 2 * -0.2
g2s = (icens - icenter) * 0.1 / 2048.
return sigmas, g1s, g2s
def generate_starlist(n_samples=500):
# create n_samples images from the 63 ccds and pixel coordinates
np_rng = np.random.RandomState(1234)
icens = np_rng.randint(100, 2048, n_samples)
jcens = np_rng.randint(100, 4096, n_samples)
ccdnums = np_rng.randint(1, 63, n_samples)
icenter = 1000
jcenter = 2000
# throw out any icens and jcens that are within 400 pixels of the center
conds = (np.abs(icens - icenter) > 400) | (np.abs(jcens - jcenter) > 400)
icens = icens[conds]
jcens = jcens[conds]
ccdnums = ccdnums[conds]
sigmas, g1s, g2s = psf_model(icens, jcens, icenter, jcenter)
# throw in a 2d polynomial function for sigma g1 and g2
# all sigma > 0, all g1 < 0, and g2 straddles.
star_list = [make_star(icen, jcen, ccdnum, sigma, g1, g2)
for icen, jcen, ccdnum, sigma, g1, g2
in zip(icens, jcens, ccdnums, sigmas, g1s, g2s)]
# load up model and draw the stars
model = piff.Gaussian(fastfit=True)
star_list = [model.draw(star) for star in star_list]
star_list = [model.initialize(star) for star in star_list]
star_list = [model.fit(star) for star in star_list]
return star_list, model
@timer
def setup():
"""Build an input image and catalog used by a few tests below.
"""
# Make the image (copied from test_single_image in test_simple.py)
image = galsim.Image(2048, 2048, scale=0.26)
# Where to put the stars.
x_list = [ 123.12, 345.98, 567.25, 1094.94, 924.15, 1532.74, 1743.11, 888.39, 1033.29, 1409.31 ]
y_list = [ 345.43, 567.45, 1094.32, 924.29, 1532.92, 1743.83, 888.83, 1033.19, 1409.20, 123.11 ]
# Draw a Gaussian PSF at each location on the image.
sigma = 1.3
g1 = 0.23
g2 = -0.17
dx = 0.31 # in pixels
dy = -0.32
flux = 123.45
psf = galsim.Gaussian(sigma=sigma).shear(g1=g1, g2=g2) * flux
for x, y in zip(x_list, y_list):
bounds = galsim.BoundsI(int(x-31), int(x+32), int(y-31), int(y+32))
offset = galsim.PositionD( x-int(x)-0.5 + dx, y-int(y)-0.5 + dy)
psf.drawImage(image=image[bounds], method='no_pixel', offset=offset)
image.addNoise(galsim.GaussianNoise(rng=galsim.BaseDeviate(1234), sigma=1e-6))
# Write out the image to a file
image_file = os.path.join('output','test_stats_image.fits')
image.write(image_file)
# Write out the catalog to a file
dtype = [ ('x','f8'), ('y','f8') ]
data = np.empty(len(x_list), dtype=dtype)
data['x'] = x_list
data['y'] = y_list
cat_file = os.path.join('output','test_stats_cat.fits')
fitsio.write(cat_file, data, clobber=True)
@timer
def test_twodstats_config():
"""Test running stats through a config file.
"""
if __name__ == '__main__':
logger = piff.config.setup_logger(verbose=2)
else:
logger = piff.config.setup_logger(log_file='output/test_twodstats_config.log')
image_file = os.path.join('output','test_stats_image.fits')
cat_file = os.path.join('output','test_stats_cat.fits')
psf_file = os.path.join('output','test_twodstats.fits')
twodhist_file = os.path.join('output','test_twodhiststats.pdf')
twodhist_std_file = os.path.join('output','test_twodhiststats_std.pdf')
config = {
'input' : {
'image_file_name' : image_file,
'cat_file_name' : cat_file,
'stamp_size' : 48
},
'psf' : {
'model' : { 'type' : 'Gaussian',
'fastfit': True,
'include_pixel': False },
'interp' : { 'type' : 'Mean' },
},
'output' : {
'file_name' : psf_file,
'stats' : [
{
'type': 'TwoDHist',
'file_name': twodhist_file,
'number_bins_u': 3,
'number_bins_v': 3,
},
{
'type': 'TwoDHist',
'file_name': twodhist_std_file,
'reducing_function': 'np.std',
'number_bins_u': 3,
'number_bins_v': 3,
},
]
}
}
piff.piffify(config, logger)
assert os.path.isfile(twodhist_file)
assert os.path.isfile(twodhist_std_file)
# repeat with plotify function
os.remove(twodhist_file)
os.remove(twodhist_std_file)
piff.plotify(config, logger)
assert os.path.isfile(twodhist_file)
assert os.path.isfile(twodhist_std_file)
@timer
def test_rhostats_config():
"""Test running stats through a config file.
"""
if __name__ == '__main__':
logger = piff.config.setup_logger(verbose=2)
else:
logger = piff.config.setup_logger(log_file='output/test_rhostats_config.log')
image_file = os.path.join('output','test_stats_image.fits')
cat_file = os.path.join('output','test_stats_cat.fits')
psf_file = os.path.join('output','test_rhostats.fits')
rho_file = os.path.join('output','test_rhostats.pdf')
config = {
'input' : {
'image_file_name' : image_file,
'cat_file_name' : cat_file,
'stamp_size' : 48
},
'psf' : {
'model' : { 'type' : 'Gaussian',
'fastfit': True,
'include_pixel': False },
'interp' : { 'type' : 'Mean' },
},
'output' : {
'file_name' : psf_file,
'stats' : { # Note: stats doesn't have to be a list.
'type': 'Rho',
'file_name': rho_file
}
},
}
piff.piffify(config, logger)
assert os.path.isfile(rho_file)
# repeat with plotify function
os.remove(rho_file)
piff.plotify(config, logger)
assert os.path.isfile(rho_file)
# Test rho statistics directly.
min_sep = 1
max_sep = 100
bin_size = 0.1
psf = piff.read(psf_file)
orig_stars, wcs, pointing = piff.Input.process(config['input'], logger)
stats = piff.RhoStats(min_sep=min_sep, max_sep=max_sep, bin_size=bin_size)
stats.compute(psf, orig_stars)
rhos = [stats.rho1, stats.rho2, stats.rho3, stats.rho4, stats.rho5]
for rho in rhos:
# Test the range of separations
radius = np.exp(rho.logr)
# last bin can be one bigger than max_sep
np.testing.assert_array_less(radius, np.exp(np.log(max_sep) + bin_size))
np.testing.assert_array_less(min_sep, radius)
np.testing.assert_array_almost_equal(np.diff(rho.logr), bin_size, decimal=5)
# Test that the max absolute value of each rho isn't crazy
np.testing.assert_array_less(np.abs(rho.xip), 1)
# # Check that each rho isn't precisely zero. This means the sum of abs > 0
np.testing.assert_array_less(0, np.sum(np.abs(rho.xip)))
# Test using the piffify executable
os.remove(rho_file)
config['verbose'] = 0
with open('rho.yaml','w') as f:
f.write(yaml.dump(config, default_flow_style=False))
piffify_exe = get_script_name('piffify')
p = subprocess.Popen( [piffify_exe, 'rho.yaml'] )
p.communicate()
assert os.path.isfile(rho_file)
# Test using the plotify executable
os.remove(rho_file)
plotify_exe = get_script_name('plotify')
p = subprocess.Popen( [plotify_exe, 'rho.yaml'] )
p.communicate()
assert os.path.isfile(rho_file)
# test running plotify with dir in config, with no logger, and with a modules specification.
# (all to improve test coverage)
config['output']['dir'] = '.'
config['modules'] = [ 'custom_wcs' ]
os.remove(rho_file)
piff.plotify(config)
assert os.path.isfile(rho_file)
@timer
def test_shapestats_config():
"""Test running stats through a config file.
"""
if __name__ == '__main__':
logger = piff.config.setup_logger(verbose=2)
else:
logger = piff.config.setup_logger(log_file='output/test_shapestats_config.log')
image_file = os.path.join('output','test_stats_image.fits')
cat_file = os.path.join('output','test_stats_cat.fits')
psf_file = os.path.join('output','test_shapestats.fits')
shape_file = os.path.join('output','test_shapestats.pdf')
config = {
'input' : {
'image_file_name' : image_file,
'cat_file_name' : cat_file,
'stamp_size' : 48
},
'psf' : {
'model' : { 'type' : 'Gaussian',
'fastfit': True,
'include_pixel': False },
'interp' : { 'type' : 'Mean' },
},
'output' : {
'file_name' : psf_file,
'stats' : [
{
'type': 'ShapeHistograms',
'file_name': shape_file
},
]
},
}
piff.piffify(config, logger)
assert os.path.isfile(shape_file)
# repeat with plotify function
os.remove(shape_file)
piff.plotify(config, logger)
assert os.path.isfile(shape_file)
# Test ShapeHistogramStats directly
psf = piff.read(psf_file)
shapeStats = piff.ShapeHistogramsStats()
orig_stars, wcs, pointing = piff.Input.process(config['input'], logger)
shapeStats.compute(psf, orig_stars)
# test their characteristics
sigma = 1.3 # (copied from setup())
g1 = 0.23
g2 = -0.17
np.testing.assert_array_almost_equal(sigma, shapeStats.T, decimal=4)
np.testing.assert_array_almost_equal(sigma, shapeStats.T_model, decimal=3)
np.testing.assert_array_almost_equal(g1, shapeStats.g1, decimal=4)
np.testing.assert_array_almost_equal(g1, shapeStats.g1_model, decimal=3)
np.testing.assert_array_almost_equal(g2, shapeStats.g2, decimal=4)
np.testing.assert_array_almost_equal(g2, shapeStats.g2_model, decimal=3)
@timer
def test_starstats_config():
"""Test running stats through a config file.
"""
if __name__ == '__main__':
logger = piff.config.setup_logger(verbose=2)
else:
logger = piff.config.setup_logger(log_file='output/test_starstats_config.log')
image_file = os.path.join('output','test_stats_image.fits')
cat_file = os.path.join('output','test_stats_cat.fits')
psf_file = os.path.join('output','test_starstats.fits')
star_file = os.path.join('output', 'test_starstats.pdf')
star_noadjust_file = os.path.join('output', 'test_starstats_noadjust.pdf')
config = {
'input' : {
'image_file_name' : image_file,
'cat_file_name' : cat_file,
'stamp_size' : 48
},
'psf' : {
'model' : { 'type' : 'Gaussian',
'fastfit': True,
'include_pixel': False },
'interp' : { 'type' : 'Mean' },
},
'output' : {
'file_name' : psf_file,
'stats' : [
{
'type': 'Star',
'file_name': star_file,
'number_plot': 5,
'adjust_stars': True,
}
]
}
}
piff.piffify(config, logger)
assert os.path.isfile(star_file)
# repeat with plotify function
os.remove(star_file)
piff.plotify(config, logger)
assert os.path.isfile(star_file)
# check default number_plot
psf = piff.read(psf_file)
starStats = piff.StarStats()
orig_stars, wcs, pointing = piff.Input.process(config['input'], logger)
starStats.compute(psf, orig_stars)
assert starStats.number_plot == len(starStats.stars)
assert starStats.number_plot == len(starStats.models)
assert starStats.number_plot == len(starStats.indices)
np.testing.assert_array_equal(starStats.stars[2].image.array,
orig_stars[starStats.indices[2]].image.array)
# check number_plot = 6
starStats = piff.StarStats(number_plot=6)
starStats.compute(psf, orig_stars)
assert len(starStats.stars) == 6
# check number_plot >> len(stars)
starStats = piff.StarStats(number_plot=1000000)
starStats.compute(psf, orig_stars)
assert len(starStats.stars) == len(orig_stars)
# if use all stars, no randomness
np.testing.assert_array_equal(starStats.stars[3].image.array, orig_stars[3].image.array)
np.testing.assert_array_equal(starStats.indices, np.arange(len(orig_stars)))
# check number_plot = 0
starStats = piff.StarStats(number_plot=0)
starStats.compute(psf, orig_stars)
assert len(starStats.stars) == len(orig_stars)
# if use all stars, no randomness
np.testing.assert_array_equal(starStats.stars[3].image.array, orig_stars[3].image.array)
np.testing.assert_array_equal(starStats.indices, np.arange(len(orig_stars)))
# rerun with adjust stars and see if it did the right thing
# first with starstats == False
starStats = piff.StarStats(number_plot=0, adjust_stars=False)
starStats.compute(psf, orig_stars, logger=logger)
fluxs_noadjust = np.array([s.fit.flux for s in starStats.stars])
ds_noadjust = np.array([s.fit.center for s in starStats.stars])
# check that fluxes 1
|
np.testing.assert_array_equal(fluxs_noadjust, 1)
|
numpy.testing.assert_array_equal
|
#!/usr/bin/env python
# coding: utf-8
def read_usv(adir_usv, iusv):
import xarray as xr
import numpy as np
filename_usv_list = ['pmel_2015_sd126-ALL-1_min-v1.nc',
'pmel_2015_sd128-ALL-1_min-v1.nc',
'pmel_2016_sd126-ALL-1_min-v1.nc',
'pmel_2016_sd128-ALL-1_min-v1.nc',
'arctic_2019_sd1033-NRT-1_min-v1.nc',
'arctic_2019_sd1034-NRT-1_min-v1.nc',
'arctic_2019_sd1035-NRT-1_min-v1.nc',
'arctic_2019_sd1036-NRT-1_min-v1.nc',
'arctic_2019_sd1037-NRT-1_min-v1.nc',
'saildrone-gen_5-antarctica_circumnavigation_2019-sd1020-20190119T040000-20190803T043000-1440_minutes-v1.1564857794963.nc',
'wcoast_2018_sd1024-ALL-1_min-v1.nc',
'wcoast_2018_sd1025-ALL-1_min-v1.nc',
'wcoast_2018_sd1026-ALL-1_min-v1.nc',
'wcoast_2018_sd1027-ALL-1_min-v1.nc',
'wcoast_2018_sd1028-ALL-1_min-v1.nc']
name_usv_list = ['pmel_2015_sd126', 'pmel_2015_sd128', 'pmel_2016_sd126', 'pmel_2016_sd128',
'arctic2019_1033', 'arctic2019_1034', 'arctic2019_1035', 'arctic2019_1036', 'arctic2019_1037',
'antarctic2019','wcoast1025','wcoast1026','wcoast1027','wcoast1028','wcoast1029']
filename_usv = adir_usv + filename_usv_list[iusv]
print('FILEIN:', filename_usv)
ds_usv = xr.open_dataset(filename_usv)
ds_usv.close()
# NEED TO FIND OUT IF wind_speed is to/from wind_direction ?
if (iusv == 0 or iusv == 1): # 1033
ds_usv = ds_usv.rename(
{'temp_air_mean': 'TEMP_AIR_MEAN', 'rh_mean': 'RH_MEAN', 'baro_pres_mean': 'BARO_PRES_MEAN',
'sal_mean': 'SAL_MEAN', 'temp_ctd_mean': 'TEMP_CTD_MEAN', 'temp_o2_mean': 'TEMP_O2_MEAN',
'chlor_mean': 'CHLOR_MEAN', 'gust_wnd_mean': 'GUST_WND_MEAN', 'temp_ctd_stddev': 'TEMP_CTD_STDDEV'})
tem_att = ds_usv.wind_speed_mean.attrs
ds_usv['wind_speed_mean'] = ds_usv.wind_speed_mean * .51444
ds_usv.wind_speed_mean.attrs = tem_att
ds_usv.wind_speed_mean.attrs['units'] = 'm s-1'
uwnd = ds_usv.wind_speed_mean * np.cos(np.deg2rad(ds_usv.wind_direction_mean))
vwnd = ds_usv.wind_speed_mean * np.sin(
|
np.deg2rad(ds_usv.wind_direction_mean)
|
numpy.deg2rad
|
from unittest import TestCase
from tempfile import TemporaryDirectory
from pathlib import Path
from giant.camera_models import PinholeModel, OwenModel, BrownModel, OpenCVModel, save, load
import numpy as np
import giant.rotations as at
import lxml.etree as etree
class TestPinholeModel(TestCase):
def setUp(self):
self.Class = PinholeModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters=['focal_length', 'px'])
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 0, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
def test_estimation_parameters(self):
model = self.Class()
model.estimation_parameters = 'kx'
self.assertEqual(model.estimation_parameters, ['kx'])
model.estimate_multiple_misalignments = False
model.estimation_parameters = ['px', 'py', 'Multiple misalignments']
self.assertEqual(model.estimation_parameters, ['px', 'py', 'multiple misalignments'])
self.assertTrue(model.estimate_multiple_misalignments)
def test_kx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_ky(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 3, 0]]))
self.assertEqual(model.ky, 3)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_px(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 20], [0, 3, 0]]))
self.assertEqual(model.px, 20)
model.px = 100
self.assertEqual(model.px, 100)
self.assertEqual(model.intrinsic_matrix[0, 2], 100)
def test_py(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 0, 10]]))
self.assertEqual(model.py, 10)
model.py = 100
self.assertEqual(model.py, 100)
self.assertEqual(model.intrinsic_matrix[1, 2], 100)
def test_a1(self):
model = self.Class(temperature_coefficients=np.array([10, 0, 0]))
self.assertEqual(model.a1, 10)
model.a1 = 100
self.assertEqual(model.a1, 100)
self.assertEqual(model.temperature_coefficients[0], 100)
def test_a2(self):
model = self.Class(temperature_coefficients=np.array([0, 10, 0]))
self.assertEqual(model.a2, 10)
model.a2 = 100
self.assertEqual(model.a2, 100)
self.assertEqual(model.temperature_coefficients[1], 100)
def test_a3(self):
model = self.Class(temperature_coefficients=np.array([0, 0, 10]))
self.assertEqual(model.a3, 10)
model.a3 = 100
self.assertEqual(model.a3, 100)
self.assertEqual(model.temperature_coefficients[2], 100)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, px=100, py=-5)
np.testing.assert_array_almost_equal(model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_get_temperature_scale(self):
model = self.Class(temperature_coefficients=[1, 2, 3.])
self.assertEqual(model.get_temperature_scale(1), 7)
np.testing.assert_array_equal(model.get_temperature_scale([1, 2]), [7, 35])
np.testing.assert_array_equal(model.get_temperature_scale([-1, 2.]), [-1, 35])
def test_apply_distortion(self):
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class()
for inp in inputs:
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, inp)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=1, a2=2, a3=3)
with self.subTest(misalignment=None):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
gnom, _, pix = model.get_projections(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=-1e-3, a2=1e-6, a3=-7e-8)
with self.subTest(misalignment=None):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
pix = model.project_onto_image(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
pix = model.project_onto_image(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
pix = model.project_onto_image(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
pix = model.project_onto_image(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40,
"px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dmisalignment(self):
def num_deriv(loc, dtheta, delta=1e-10) -> np.ndarray:
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [delta, 0, 0]).squeeze()
point_pert_x_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, delta, 0]).squeeze()
point_pert_y_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, 0, delta]).squeeze()
point_pert_z_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [delta, 0, 0]).squeeze()
point_pert_x_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, delta, 0]).squeeze()
point_pert_y_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, 0, delta]).squeeze()
point_pert_z_b = mis_pert @ loc
return np.array([(point_pert_x_f - point_pert_x_b) / (2 * delta),
(point_pert_y_f - point_pert_y_b) / (2 * delta),
(point_pert_z_f - point_pert_z_b) / (2 * delta)]).T
inputs = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [np.sqrt(3), np.sqrt(3), np.sqrt(3)],
[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-np.sqrt(3), -np.sqrt(3), -np.sqrt(3)],
[1, 0, 100], [0, 0.5, 1]]
misalignment = [[1e-8, 0, 0], [0, 1e-8, 0], [0, 0, 1e-8], [1e-9, 1e-9, 1e-9],
[-1e-8, 0, 0], [0, -1e-8, 0], [0, 0, -1e-8], [-1e-9, -1e-9, -1e-9],
[1e-9, 2.3e-9, -0.5e-9]]
for mis in misalignment:
with self.subTest(misalignment=mis):
for inp in inputs:
num = num_deriv(inp, mis)
# noinspection PyTypeChecker
ana = self.Class._compute_dcamera_point_dmisalignment(inp)
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-4)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dgnomic_dcamera_point(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0, 0]
gnom_pert_x_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, delta, 0]
gnom_pert_y_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, 0, delta]
gnom_pert_z_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [delta, 0, 0]
gnom_pert_x_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, delta, 0]
gnom_pert_y_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, 0, delta]
gnom_pert_z_b = cmodel.get_projections(loc_pert)[0]
return np.array([(gnom_pert_x_f - gnom_pert_x_b) / (2 * delta),
(gnom_pert_y_f - gnom_pert_y_b) / (2 * delta),
(gnom_pert_z_f - gnom_pert_z_b) / (2 * delta)]).T
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [0.5, 1e-14, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dcamera_point(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-9, rtol=1e-5)
def test__compute_dgnomic_dfocal_length(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
gnom_pert_f = model_pert.get_projections(loc)[0]
model_pert = cmodel.copy()
model_pert.focal_length -= delta
gnom_pert_b = model_pert.get_projections(loc)[0]
# noinspection PyTypeChecker
return np.asarray((gnom_pert_f - gnom_pert_b) / (2 * delta))
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dfocal_length(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-5)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dtemperature_coeffs(self):
def num_deriv(loc, cmodel, delta=1e-6, temperature=0) -> np.ndarray:
loc = np.array(loc)
model_pert = cmodel.copy()
model_pert.a1 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a1 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_a1_f - pix_pert_a1_b) / (2 * delta),
(pix_pert_a2_f - pix_pert_a2_b) / (2 * delta),
(pix_pert_a3_f - pix_pert_a3_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, -10.5, 10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_dtemperature_coeffs(inp, temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2)]).T
# TODO: investigate why this fails with slightly larger misalignments and temperature coefficients
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-4, "a2": 2e-7, "a3": 3e-8,
"misalignment": [[2e-15, -1.2e-14, 5e-16], [-1e-14, 2e-14, -1e-15]]}
inputs = [[0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [[10], [-22], [1200.23]]]
temperatures = [0, -1, 1, -10.5, 10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for inp in inputs:
for temp in temperatures:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-2, atol=1e-10)
num = num_deriv(inp, model, delta=1, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-2)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-5, "a2": 1e-6, "a3": 1e-7,
"misalignment": [[0, 0, 1e-15], [0, 2e-15, 0], [3e-15, 0, 0]]}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1000]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-2, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-2, atol=1e-10)
def test_remove_jacobian_columns(self):
jac = np.arange(30).reshape(1, -1)
model = self.Class()
for est_param, vals in model.element_dict.items():
model.estimation_parameters = [est_param]
expected = jac[0, vals]
np.testing.assert_array_equal(model._remove_jacobian_columns(jac), [expected])
def test_apply_update(self):
model_param = {"focal_length": 0, "kx": 0, "ky": 0,
"px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]]}
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
update_vec = np.arange(14)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[8:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
gnomic = [[1, 0], [0, 1], [-1, 0], [0, -1],
[0.5, 0], [0, 0.5], [-0.5, 0], [0, -0.5],
[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5],
[[1, 0, 0.5], [0, 1.5, -0.5]]]
model = self.Class(kx=2000, ky=-3000.2, px=1025, py=937.567,
a1=1e-3, a2=2e-6, a3=-5.5e-8)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnoms in gnomic:
for temp in temperatures:
with self.subTest(gnoms=gnoms, temp=temp):
dis_gnoms = np.asarray(model.apply_distortion(gnoms)).astype(float)
dis_gnoms *= model.get_temperature_scale(temp)
pixels = ((model.intrinsic_matrix[:, :2] @ dis_gnoms).T + model.intrinsic_matrix[:, 2]).T
gnoms_solved = model.pixels_to_gnomic(pixels, temperature=temp)
np.testing.assert_allclose(gnoms_solved, gnoms)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "px": 4005.23, 'py': 2000.33, 'a1': 1e-6, 'a2': 1e-5, 'a3': 2e-5}
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class(**intrins_param)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnom in pinhole:
gnom = np.asarray(gnom).astype(float)
for temp in temperatures:
with self.subTest(gnom=gnom, temp=temp):
mm_dist = model.apply_distortion(np.array(gnom))
temp_scale = model.get_temperature_scale(temp)
mm_dist *= temp_scale
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnom *= temp_scale
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnom).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': -1e-10, 'a3': 2e-4,
'misalignment': [[1e-10, 2e-13, -3e-12], [4e-8, -5.3e-9, 9e-15]]}
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**intrins_param)
# TODO: consider adjusting so this isn't needed
model.estimate_multiple_misalignments = True
for vec in camera_vecs:
for image in [0, 1]:
for temp in temperatures:
with self.subTest(vec=vec, image=image, temp=temp):
pixel_loc = model.project_onto_image(vec, image=image, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=image, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 0, 3], [0, 5, 6]]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'])
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 0, 13], [0, 15, 16]]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'])
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(model2.field_of_view, modeltest.field_of_view)
self.assertEqual(model2.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model2.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model2.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model2.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model2.estimation_parameters, modeltest.estimation_parameters)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(model1.field_of_view, modeltest.field_of_view)
self.assertEqual(model1.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model1.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model1.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model1.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model1.estimation_parameters, modeltest.estimation_parameters)
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
# noinspection PyTypeChecker
np.testing.assert_allclose(dist, 0, atol=1e-10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
def test_undistort_image(self):
# not sure how best to do this test...
pass
def test_copy(self):
model = self.Class()
model_copy = model.copy()
model.kx = 1000
model.ky = 999
model.px = 100
model.py = -20
model.a1 = 5
model.a2 = 6
model.a3 = 7
model._focal_length = 11231
model.field_of_view = 1231231
model.use_a_priori = True
model.estimation_parameters = ['a1', 'kx', 'ky']
model.estimate_multiple_misalignments = True
model.misalignment = [1231241, 123124, .12]
self.assertNotEqual(model.kx, model_copy.kx)
self.assertNotEqual(model.ky, model_copy.ky)
self.assertNotEqual(model.px, model_copy.px)
self.assertNotEqual(model.py, model_copy.py)
self.assertNotEqual(model.a1, model_copy.a1)
self.assertNotEqual(model.a2, model_copy.a2)
self.assertNotEqual(model.a3, model_copy.a3)
self.assertNotEqual(model.focal_length, model_copy.focal_length)
self.assertNotEqual(model.field_of_view, model_copy.field_of_view)
self.assertNotEqual(model.use_a_priori, model_copy.use_a_priori)
self.assertNotEqual(model.estimate_multiple_misalignments, model_copy.estimate_multiple_misalignments)
self.assertNotEqual(model.estimation_parameters, model_copy.estimation_parameters)
self.assertTrue((model.misalignment != model_copy.misalignment).all())
def test_to_from_elem(self):
element = etree.Element(self.Class.__name__)
model = self.Class(focal_length=20, field_of_view=5, use_a_priori=True,
misalignment=[1, 2, 3], kx=2, ky=200, px=50, py=300,
a1=37, a2=1, a3=-1230,
estimation_parameters=['a1', 'multiple misalignments'], n_rows=20, n_cols=30)
model_copy = model.copy()
with self.subTest(misalignment=True):
element = model.to_elem(element, misalignment=True)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
self.assertEqual(model, model_new)
with self.subTest(misalignment=False):
element = model.to_elem(element, misalignment=False)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
model.estimation_parameters[-1] = 'single misalignment'
model.estimate_multiple_misalignments = False
model.misalignment = np.zeros(3)
self.assertEqual(model, model_new)
class TestOwenModel(TestPinholeModel):
def setUp(self):
self.Class = OwenModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 7))
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80, kyx=90,
estimation_parameters=['focal_length', 'px'], n_rows=500, n_cols=600,
e1=1, radial2=2, pinwheel2=3, e4=4, tangential_x=6, e5=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [90, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, [2, 4, 5, 6, 1, 3])
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_kyx(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [3, 0, 0]]))
self.assertEqual(model.kyx, 3)
model.kyx = 100
self.assertEqual(model.kyx, 100)
self.assertEqual(model.intrinsic_matrix[1, 0], 100)
def test_e1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.e1, 1)
model.e1 = 100
self.assertEqual(model.e1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_e2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.e2, 1)
model.e2 = 100
self.assertEqual(model.e2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_e3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.e3, 1)
model.e3 = 100
self.assertEqual(model.e3, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_e4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.e4, 1)
model.e4 = 100
self.assertEqual(model.e4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_e5(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.e5, 1)
model.e5 = 100
self.assertEqual(model.e5, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_e6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.e6, 1)
model.e6 = 100
self.assertEqual(model.e6, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_pinwheel1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.pinwheel1, 1)
model.pinwheel1 = 100
self.assertEqual(model.pinwheel1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_pinwheel2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.pinwheel2, 1)
model.pinwheel2 = 100
self.assertEqual(model.pinwheel2, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_tangential_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.tangential_y, 1)
model.tangential_y = 100
self.assertEqual(model.tangential_y, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tangential_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.tangential_x, 1)
model.tangential_x = 100
self.assertEqual(model.tangential_x, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_apply_distortion(self):
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [0.5, 0], [(1.5 + 1.5 ** 3), 0], [-1.5 + 1.5 ** 3, 0],
[[(1.5 + 1.5 ** 3)], [0]], [[(1.5 + 1.5 ** 3), 0.5], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [2.5, 2.5]],
[[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 2.5], [0, 0.5], [0, 1.5 + 1.5 ** 3], [0, -1.5 + 1.5 ** 3], [[0], [1.5 + 1.5 ** 3]],
[[0, 0], [1.5 + 1.5 ** 3, 0.5]], [2.5, 2.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 3], [-1.5, -1.5 ** 3],
[[1.5], [1.5 ** 3]], [[1.5, -1], [1.5 ** 3, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[-1.5 ** 3], [1.5]],
[[-1.5 ** 3, 1.5], [1.5, -1]],
[1 - np.sqrt(2) * 1.5, 1 + np.sqrt(2) * 1.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 5], [-1.5, -1.5 ** 5],
[[1.5], [1.5 ** 5]], [[1.5, -1], [1.5 ** 5, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 5, 1.5], [1.5 ** 5, -1.5], [[-1.5 ** 5], [1.5]],
[[-1.5 ** 5, 1.5], [1.5, -1]],
[1 - 2 * np.sqrt(2) * 1.5, 1 + 2 * np.sqrt(2) * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23e-8, a1=1e-1, a2=1e-6, a3=-3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, a1=1, a2=2, a3=-3)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-6)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistortion_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert) - loc_pert
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5},
{"e1": -1.5, "e2": -1.5, "e3": -1.5, "e4": -1.5, "e5": -1.5, "e6": -1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistortion_dgnomic(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 1.5, "a2": 0, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 1.5, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5,
"a1": 1.5, "a2": 1.5, "a3": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_kyx_f - pix_pert_kyx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_ddistorted_gnomic_ddistortion(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.radial2 += delta
loc_pert_r2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 += delta
loc_pert_r4_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y += delta
loc_pert_ty_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x += delta
loc_pert_tx_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
loc_pert_p1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
loc_pert_p2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial2 -= delta
loc_pert_r2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 -= delta
loc_pert_r4_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
loc_pert_ty_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
loc_pert_tx_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
loc_pert_p1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
loc_pert_p2_b = model_pert.apply_distortion(loc)
return np.array([(loc_pert_r2_f - loc_pert_r2_b) / (2 * delta),
(loc_pert_r4_f - loc_pert_r4_b) / (2 * delta),
(loc_pert_ty_f - loc_pert_ty_b) / (2 * delta),
(loc_pert_tx_f - loc_pert_tx_b) / (2 * delta),
(loc_pert_p1_f - loc_pert_p1_b) / (2 * delta),
(loc_pert_p2_f - loc_pert_p2_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_ddistortion(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_m = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_m
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_m
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_m
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_m
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_m
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_m
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_m * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_m * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_m * 2)]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [[0.1, 0, 1], [0, 0.1, 1], [0.1, 0.1, 1], [-0.1, 0, 1], [0, -0.1, 1], [-0.1, -0.1, 1],
[5, 10, 1000.23], [[1], [2], [1200.23]]]
temps = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temps:
for inp in inputs:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1e-3, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-3, atol=1e-10)
num = num_deriv(inp, model, delta=1e-3, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-3)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5, [1, -10, 10]]
model = self.Class(**model_param, estimation_parameters=['intrinsic', 'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-3, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test_apply_update(self):
model_param = {"focal_length": 0, "radial2": 0, "radial4": 0, "tangential_x": 0,
"tangential_y": 0, "pinwheel1": 0, "pinwheel2": 0, "kx": 0, "ky": 0,
"kxy": 0, "kyx": 0, "px": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]],
"a1": 0, "a2": 0, "a3": 0}
model = self.Class(**model_param, estimation_parameters=['intrinsic', "temperature dependence",
'multiple misalignments'])
update_vec = np.arange(22)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[16:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
'a1': 1e-6, 'a2': 1e-7, 'a3': 1e-8}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
mm_undist = model.pixels_to_gnomic(pix_dist, temperature=temp)
np.testing.assert_allclose(mm_undist, gnoms, atol=1e-13)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
"a1": 1e-3, "a2": 1e-4, "a3": 1e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
gnoms = np.array(gnoms).astype(np.float64)
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnoms *= model.get_temperature_scale(temp)
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnoms).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8,
"px": 4005.23, 'py': 2000.33, "a1": 1e-6, "a2": 1e-7, "a3": -3e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3},
{"misalignment": np.array([1e-11, 2e-12, -1e-10])},
{"misalignment": np.array([[1e-11, 2e-12, -1e-10], [-1e-13, 1e-11, 2e-12]]),
"estimation_parameters": "multiple misalignments"}]
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for vec in camera_vecs:
with self.subTest(**dist, temp=temp, vec=vec):
pixel_loc = model.project_onto_image(vec, image=-1, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=-1, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]),
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'], a1=0, a2=3, a3=5)
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 12, 13], [14, 15, 16]]),
distortion_coefficients=np.array([11, 12, 13, 14, 15, 16]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'], a1=-100, a2=-200, a3=-300)
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(modeltest, model2)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(modeltest, model1)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, kxy=20, kyx=-30.4, px=100, py=-5)
np.testing.assert_array_almost_equal(
model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(
model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5,
kxy=0.5, kyx=-8, radial2=1e-5, radial4=1e-5, pinwheel2=1e-7, pinwheel1=-1e-12,
tangential_x=1e-6, tangential_y=2e-12)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix, atol=1e-10)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095, kxy=10, kyx=-5,
e1=1e-6, e2=1e-12, e3=-4e-10, e5=6e-7, e6=-1e-5, e4=1e-7,
a1=1e-6, a2=-1e-7, a3=4e-12)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
distl = model.distort_pixels(np.vstack([cs.flatten(), rs.flatten()]))
np.testing.assert_array_equal(distl - np.vstack([cs.flatten(), rs.flatten()]), dist)
class TestBrownModel(TestPinholeModel):
def setUp(self):
self.Class = BrownModel
# Not supported for this model
test__compute_dgnomic_dfocal_length = None
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
model = self.Class(kx=1, fy=2, px=4, py=5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80,
estimation_parameters=['kx', 'px'], n_rows=500, n_cols=600,
radial2=1, radial4=2, k3=3, p1=4, tiptilt_x=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['kx', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
def test_fx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_fy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 1, 0]]))
self.assertEqual(model.ky, 1)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_alpha(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.alpha, 1)
model.alpha = 100
self.assertEqual(model.alpha, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_k1(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.k1, 1)
model.k1 = 100
self.assertEqual(model.k1, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_k2(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.k2, 1)
model.k2 = 100
self.assertEqual(model.k2, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_k3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.k3, 1)
model.k3 = 100
self.assertEqual(model.k3, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_p1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.p1, 1)
model.p1 = 100
self.assertEqual(model.p1, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_p2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.p2, 1)
model.p2 = 100
self.assertEqual(model.p2, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_radial6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.radial6, 1)
model.radial6 = 100
self.assertEqual(model.radial6, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tiptilt_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.tiptilt_y, 1)
model.tiptilt_y = 100
self.assertEqual(model.tiptilt_y, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_tiptilt_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.tiptilt_x, 1)
model.tiptilt_x = 100
self.assertEqual(model.tiptilt_x, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_apply_distortion(self):
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 8), 0], [-(1.5 + 1.5 ** 8), 0],
[[(1.5 + 1.5 ** 8)], [0]], [[(1.5 + 1.5 ** 8), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 8)], [0, -(1.5 + 1.5 ** 8)], [[0], [(1.5 + 1.5 ** 8)]],
[[0, 0], [(1.5 + 1.5 ** 8), -2.5]], [1 + 8 * 1.5, 1 + 8 * 1.5]],
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 3], [-1.5, 1.5 ** 3], [[1.5], [1.5 ** 3]],
[[1.5, -1], [1.5 ** 3, 1.5]],
[0, 1 + 3 * 1.5], [0, -1 + 3 * 1.5], [0, 1.5 + 3 * 1.5 ** 3], [0, -1.5 + 3 * 1.5 ** 3],
[[0], [1.5 + 3 * 1.5 ** 3]],
[[0, 0], [1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5]], [1 + 2 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [1 + 3 * 1.5, 0], [-1 + 3 * 1.5, 0], [1.5 + 3 * 1.5 ** 3, 0], [-1.5 + 3 * 1.5 ** 3, 0],
[[1.5 + 3 * 1.5 ** 3], [0]], [[1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[1.5 ** 3], [1.5]],
[[1.5 ** 3, 1.5], [1.5, -1]],
[1 + 4 * 1.5, 1 + 2 * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1e-1, a2=-1e-6, a3=3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1, a2=2, a3=-3)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(temp=temp, misalignment=None):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model, delta=1e-8)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-1, atol=1e-10)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T/10,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T/10]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=100, py=100.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.05, k2=-0.03, k3=0.015, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistorted_gnomic_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert)
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5},
{"k1": -1.5, "k2": -1.5, "k3": -1.5, "p1": -1.5, "p2": -1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r4 = r ** 4
r6 = r ** 6
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_dgnomic(np.array(inp), r2, r4, r6)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(np.array(inp), model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for inp in inputs:
with self.subTest(**intrins_coef, inp=inp):
num = num_deriv(inp, model, delta=1e-5)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-14, rtol=1e-5)
def test__compute_ddistorted_gnomic_ddistortion(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.k1 += delta
loc_pert_k1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k2 += delta
loc_pert_k2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k3 += delta
loc_pert_k3_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p1 += delta
loc_pert_p1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p2 += delta
loc_pert_p2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k1 -= delta
loc_pert_k1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k2 -= delta
loc_pert_k2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k3 -= delta
loc_pert_k3_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p1 -= delta
loc_pert_p1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p2 -= delta
loc_pert_p2_b = model_pert.apply_distortion(loc)
return np.array([(loc_pert_k1_f - loc_pert_k1_b) / (2 * delta),
(loc_pert_k2_f - loc_pert_k2_b) / (2 * delta),
(loc_pert_k3_f - loc_pert_k3_b) / (2 * delta),
(loc_pert_p1_f - loc_pert_p1_b) / (2 * delta),
(loc_pert_p2_f - loc_pert_p2_b) / (2 * delta)]).T
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r4 = r ** 4
r6 = r ** 6
num = num_deriv(np.array(inp), model)
ana = model._compute_ddistorted_gnomic_ddistortion(np.array(inp), r2, r4, r6)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dgnomic_dcamera_point(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0, 0]
gnom_pert_x_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, delta, 0]
gnom_pert_y_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, 0, delta]
gnom_pert_z_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [delta, 0, 0]
gnom_pert_x_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, delta, 0]
gnom_pert_y_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, 0, delta]
gnom_pert_z_b = cmodel.get_projections(loc_pert)[0]
return np.array([(gnom_pert_x_f - gnom_pert_x_b) / (2 * delta),
(gnom_pert_y_f - gnom_pert_y_b) / (2 * delta),
(gnom_pert_z_f - gnom_pert_z_b) / (2 * delta)]).T
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [0.5, 1e-14, 1]]
model = self.Class()
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dcamera_point(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-9, rtol=1e-5)
def test_get_jacobian_row(self):
def num_deriv(loc, temp, cmodel, delta=1e-8, image=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.fx += delta
pix_pert_fx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy += delta
pix_pert_fy_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha += delta
pix_pert_skew_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fx -= delta
pix_pert_fx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy -= delta
pix_pert_fy_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha -= delta
pix_pert_skew_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 += delta
pix_pert_k1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 += delta
pix_pert_k2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 += delta
pix_pert_k3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 -= delta
pix_pert_k1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 -= delta
pix_pert_k2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 -= delta
pix_pert_k3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
return np.vstack([(pix_pert_fx_f - pix_pert_fx_b) / (delta * 2),
(pix_pert_fy_f - pix_pert_fy_b) / (delta * 2),
(pix_pert_skew_f - pix_pert_skew_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_k1_f - pix_pert_k1_b) / (delta * 2),
(pix_pert_k2_f - pix_pert_k2_b) / (delta * 2),
(pix_pert_k3_f - pix_pert_k3_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2)]).T
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1e-9, -1e-9, 2e-9],
[-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
inputs = [[0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [[1], [2], [1200.23]]]
temps = [0, 1.5, -10]
# TODO: investigate if this is actually correct
for temperature in temps:
for inp in inputs:
with self.subTest(temperature=temperature, inp=inp):
num = num_deriv(inp, temperature, model, delta=1e-2)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temperature)
np.testing.assert_allclose(ana, num, rtol=1e-1, atol=1e-10)
num = num_deriv(inp, temperature, model, delta=1e-2, image=1)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temperature)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-1)
def test_compute_jacobian(self):
def num_deriv(loc, temp, cmodel, delta=1e-8, image=0, nimages=2) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.fx += delta
pix_pert_fx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy += delta
pix_pert_fy_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha += delta
pix_pert_skew_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fx -= delta
pix_pert_fx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy -= delta
pix_pert_fy_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha -= delta
pix_pert_skew_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 += delta
pix_pert_k1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 += delta
pix_pert_k2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 += delta
pix_pert_k3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 -= delta
pix_pert_k1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 -= delta
pix_pert_k2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 -= delta
pix_pert_k3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
return np.vstack([(pix_pert_fx_f - pix_pert_fx_b) / (delta * 2),
(pix_pert_fy_f - pix_pert_fy_b) / (delta * 2),
(pix_pert_skew_f - pix_pert_skew_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_k1_f - pix_pert_k1_b) / (delta * 2),
(pix_pert_k2_f - pix_pert_k2_b) / (delta * 2),
(pix_pert_k3_f - pix_pert_k3_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1e-9, -1e-9, 2e-9],
[-1e-9, 2e-9, -1e-9],
[1e-10, 2e-11, 3e-12]],
a1=0.15e-6, a2=-0.01e-7, a3=0.5e-8,
estimation_parameters=['intrinsic', 'temperature dependence', 'multiple misalignments'])
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
model.use_a_priori = False
temps = [0, -20, 20.5]
jac_ana = model.compute_jacobian(inputs, temperature=temps)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
temperature = temps[ind]
for vec in inp.T:
jac_num.append(num_deriv(vec.T, temperature, model, delta=1e-3, image=ind, nimages=numim))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-1, atol=1e-9)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temps)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
temperature = temps[ind]
for vec in inp.T:
jac_num.append(num_deriv(vec.T, temperature, model, delta=1e-3, image=ind, nimages=numim))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-1, atol=1e-9)
def test_apply_update(self):
model_param = {"fx": 0, "fy": 0, "alpha": 0, "k1": 0,
"k2": 0, "k3": 0, "p1": 0, "p2": 0, 'a1': 0, 'a2': 0, 'a3': 0,
"px": 0, "py": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]]}
model = self.Class(**model_param,
estimation_parameters=['intrinsic', 'temperature dependence', 'multiple misalignments'])
update_vec = np.arange(19)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[13:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
intrins_param = {"fx": 3000, "fy": 4000, "alpha": 0.5,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': 1e-6, 'a3': -1e-7}
dist_coefs = [{"k1": 1.5e-1, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5e-1, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5e-1, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5e-6, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5e-6}]
pinhole = [[0, 0], [0.1, 0], [-0.1, 0], [0.15, 0], [-0.15, 0], [[0.15], [0]], [[0.15, -0.1], [0, 0]],
[0, 0.1], [0, -0.1], [0, 0.15], [0, -0.15], [[0], [0.15]], [[0, 0], [0.15, -0.1]], [0.1, 0.1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for fp_pinhole in pinhole:
with self.subTest(**dist, temp=temp, fp_pinhole=fp_pinhole):
fp_dist = model.apply_distortion(np.array(fp_pinhole))
fp_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ fp_dist).T + model.intrinsic_matrix[:, 2]).T
fp_undist = model.pixels_to_gnomic(pix_dist, temperature=temp)
np.testing.assert_allclose(fp_undist, fp_pinhole, atol=1e-13)
def test_undistort_pixels(self):
intrins_param = {"fx": 3000, "fy": 4000, "alpha": 0.5,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': 1e-6, 'a3': -1e-7}
dist_coefs = [{"k1": 1.5e-1, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5e-1, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5e-1, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5e-6, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5e-6}]
pinhole = [[0, 0], [0.1, 0], [-0.1, 0], [0.15, 0], [-0.15, 0], [[0.15], [0]], [[0.15, -0.1], [0, 0]],
[0, 0.1], [0, -0.1], [0, 0.15], [0, -0.15], [[0], [0.15]], [[0, 0], [0.15, -0.1]], [0.1, 0.1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
with self.subTest(**dist, temp=temp):
for fp_pinhole in pinhole:
fp_pinhole = np.array(fp_pinhole).astype(np.float64)
fp_dist = model.apply_distortion(fp_pinhole)
fp_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ fp_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
fp_pinhole *= model.get_temperature_scale(temp)
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ fp_pinhole).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"fx": 3000, "fy": 4000, "alpha": 0.5,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-6, 'a2': -2e-7, 'a3': 4.5e-8}
dist_coefs = [{"k1": 1.5e-1, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5e-1, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5e-1, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5e-6, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5e-6},
{"misalignment": np.array([1e-11, 2e-12, -1e-10])},
{"misalignment": np.array([[1e-11, 2e-12, -1e-10], [-1e-13, 1e-11, 2e-12]]),
"estimation_parameters": "multiple misalignments"}]
camera_vecs = [[0, 0, 1], [0.1, 0, 1], [-0.1, 0, 1], [0, 0.1, 1], [0, -0.1, 1], [0.1, 0.1, 1],
[-0.1, -0.1, 1], [[0.1, -0.1], [-0.1, 0.1], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
with self.subTest(**dist, temp=temp):
for vec in camera_vecs:
pixel_loc = model.project_onto_image(vec, image=-1, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=-1, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 2, 3], [0, 5, 6]]),
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'])
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 12, 13], [0, 15, 16]]),
distortion_coefficients=np.array([11, 12, 13, 14, 15]),
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'])
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(model2.field_of_view, modeltest.field_of_view)
self.assertEqual(model2.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model2.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model2.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model2.distortion_coefficients, modeltest.distortion_coefficients)
np.testing.assert_array_equal(model2.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model2.estimation_parameters, modeltest.estimation_parameters)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(model1.field_of_view, modeltest.field_of_view)
self.assertEqual(model1.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model1.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model1.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model1.distortion_coefficients, modeltest.distortion_coefficients)
np.testing.assert_array_equal(model1.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model1.estimation_parameters, modeltest.estimation_parameters)
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5,
kxy=0.5, radial2=1e-5, radial4=1e-5, radial6=1e-7,
tiptilt_x=1e-6, tiptilt_y=2e-12)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix, atol=1e-10)
def test_to_from_elem(self):
element = etree.Element(self.Class.__name__)
model = self.Class(field_of_view=5, use_a_priori=True,
misalignment=[1, 2, 3], kx=2, ky=200, px=50, py=300, kxy=12123,
a1=37, a2=1, a3=-1230, k1=5, k2=10, k3=20, p1=-10, p2=35,
estimation_parameters=['kx', 'multiple misalignments'], n_rows=20, n_cols=30)
model_copy = model.copy()
with self.subTest(misalignment=True):
element = model.to_elem(element, misalignment=True)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
self.assertEqual(model, model_new)
with self.subTest(misalignment=False):
element = model.to_elem(element, misalignment=False)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
model.estimation_parameters[-1] = 'single misalignment'
model.estimate_multiple_misalignments = False
model.misalignment = np.zeros(3)
self.assertEqual(model, model_new)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095, kxy=10,
k1=1e-6, k2=1e-12, k3=-4e-10, p1=6e-7, p2=-1e-5,
a1=1e-6, a2=-1e-7, a3=4e-12)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
distl = model.distort_pixels(np.vstack([cs.flatten(), rs.flatten()]))
np.testing.assert_array_equal(distl - np.vstack([cs.flatten(), rs.flatten()]), dist)
class TestOpenCVModel(TestPinholeModel):
def setUp(self):
self.Class = OpenCVModel
# Not supported for this model
test__compute_dgnomic_dfocal_length = None
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
model = self.Class(kx=1, fy=2, px=4, py=5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80,
estimation_parameters=['kx', 'px'], n_rows=500, n_cols=600,
radial2n=1, radial4n=2, k3=3, p1=4, tiptilt_x=5, radial2d=9, k5=100, k6=-90,
s1=400, thinprism_2=-500, s3=600, s4=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['kx', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, [1, 2, 3, 9, 100, -90, 4, 5, 400, -500, 600, 5])
def test_fx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_fy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 1, 0]]))
self.assertEqual(model.ky, 1)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_alpha(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.alpha, 1)
model.alpha = 100
self.assertEqual(model.alpha, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_k1(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.k1, 1)
model.k1 = 100
self.assertEqual(model.k1, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_k2(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.k2, 1)
model.k2 = 100
self.assertEqual(model.k2, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_k3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k3, 1)
model.k3 = 100
self.assertEqual(model.k3, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_k4(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k4, 1)
model.k4 = 100
self.assertEqual(model.k4, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_k5(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k5, 1)
model.k5 = 100
self.assertEqual(model.k5, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_k6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k6, 1)
model.k6 = 100
self.assertEqual(model.k6, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_p1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.p1, 1)
model.p1 = 100
self.assertEqual(model.p1, 100)
self.assertEqual(model.distortion_coefficients[6], 100)
def test_p2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.p2, 1)
model.p2 = 100
self.assertEqual(model.p2, 100)
self.assertEqual(model.distortion_coefficients[7], 100)
def test_s1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]))
self.assertEqual(model.s1, 1)
model.s1 = 100
self.assertEqual(model.s1, 100)
self.assertEqual(model.distortion_coefficients[8], 100)
def test_s2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]))
self.assertEqual(model.s2, 1)
model.s2 = 100
self.assertEqual(model.s2, 100)
self.assertEqual(model.distortion_coefficients[9], 100)
def test_s3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.s3, 1)
model.s3 = 100
self.assertEqual(model.s3, 100)
self.assertEqual(model.distortion_coefficients[10], 100)
def test_s4(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.s4, 1)
model.s4 = 100
self.assertEqual(model.s4, 100)
self.assertEqual(model.distortion_coefficients[11], 100)
def test_radial2n(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.radial2n, 1)
model.radial2n = 100
self.assertEqual(model.radial2n, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_radial4n(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.radial4n, 1)
model.radial4n = 100
self.assertEqual(model.radial4n, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_radial6n(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.radial6n, 1)
model.radial6n = 100
self.assertEqual(model.radial6n, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_radial2d(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial2d, 1)
model.radial2d = 100
self.assertEqual(model.radial2d, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_radial4d(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial4d, 1)
model.radial4d = 100
self.assertEqual(model.radial4d, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial6d(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial6d, 1)
model.radial6d = 100
self.assertEqual(model.radial6d, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_tiptilt_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.tiptilt_y, 1)
model.tiptilt_y = 100
self.assertEqual(model.tiptilt_y, 100)
self.assertEqual(model.distortion_coefficients[6], 100)
def test_tiptilt_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.tiptilt_x, 1)
model.tiptilt_x = 100
self.assertEqual(model.tiptilt_x, 100)
self.assertEqual(model.distortion_coefficients[7], 100)
def test_thinprism_1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]))
self.assertEqual(model.thinprism_1, 1)
model.thinprism_1 = 100
self.assertEqual(model.thinprism_1, 100)
self.assertEqual(model.distortion_coefficients[8], 100)
def test_thinprism_2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]))
self.assertEqual(model.thinprism_2, 1)
model.thinprism_2 = 100
self.assertEqual(model.thinprism_2, 100)
self.assertEqual(model.distortion_coefficients[9], 100)
def test_thinprism_3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.thinprism_3, 1)
model.thinprism_3 = 100
self.assertEqual(model.thinprism_3, 100)
self.assertEqual(model.distortion_coefficients[10], 100)
def test_thinprism_4(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.thinprism_4, 1)
model.thinprism_4 = 100
self.assertEqual(model.thinprism_4, 100)
self.assertEqual(model.distortion_coefficients[11], 100)
def test_apply_distortion(self):
dist_coefs = [{"k1": 1.5},
{"k2": 1.5},
{"k3": 1.5},
{"p1": 1.5},
{"p2": 1.5},
{"k4": 1.5},
{"k5": 1.5},
{"k6": 1.5},
{"s1": 1.5},
{"s2": 1.5},
{"s3": 1.5},
{"s4": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [
# k1
[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
# k2
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
# k3
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 8), 0], [-(1.5 + 1.5 ** 8), 0],
[[(1.5 + 1.5 ** 8)], [0]], [[(1.5 + 1.5 ** 8), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 8)], [0, -(1.5 + 1.5 ** 8)], [[0], [(1.5 + 1.5 ** 8)]],
[[0, 0], [(1.5 + 1.5 ** 8), -2.5]], [1 + 8 * 1.5, 1 + 8 * 1.5]],
# p1
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 3], [-1.5, 1.5 ** 3], [[1.5], [1.5 ** 3]],
[[1.5, -1], [1.5 ** 3, 1.5]],
[0, 1 + 3 * 1.5], [0, -1 + 3 * 1.5], [0, 1.5 + 3 * 1.5 ** 3], [0, -1.5 + 3 * 1.5 ** 3],
[[0], [1.5 + 3 * 1.5 ** 3]],
[[0, 0], [1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5]], [1 + 2 * 1.5, 1 + 4 * 1.5]],
# p2
[[0, 0], [1 + 3 * 1.5, 0], [-1 + 3 * 1.5, 0], [1.5 + 3 * 1.5 ** 3, 0], [-1.5 + 3 * 1.5 ** 3, 0],
[[1.5 + 3 * 1.5 ** 3], [0]], [[1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[1.5 ** 3], [1.5]],
[[1.5 ** 3, 1.5], [1.5, -1]],
[1 + 4 * 1.5, 1 + 2 * 1.5]],
# k4
[[0, 0], [1 / 2.5, 0], [-1 / 2.5, 0], [1.5 / (1 + 1.5 ** 3), 0], [-1.5 / (1 + 1.5 ** 3), 0],
[[1.5 / (1 + 1.5 ** 3)], [0]], [[1.5 / (1 + 1.5 ** 3), -1 / 2.5], [0, 0]],
[0, 1 / 2.5], [0, -1 / 2.5], [0, 1.5 / (1 + 1.5 ** 3)], [0, -1.5 / (1 + 1.5 ** 3)],
[[0], [1.5 / (1 + 1.5 ** 3)]], [[0, 0], [1.5 / (1 + 1.5 ** 3), -1 / 2.5]], [1 / 4, 1 / 4]],
# k5
[[0, 0], [1 / 2.5, 0], [-1 / 2.5, 0], [1.5 / (1 + 1.5 ** 5), 0], [-1.5 / (1 + 1.5 ** 5), 0],
[[1.5 / (1 + 1.5 ** 5)], [0]], [[1.5 / (1 + 1.5 ** 5), -1 / 2.5], [0, 0]],
[0, 1 / 2.5], [0, -1 / 2.5], [0, 1.5 / (1 + 1.5 ** 5)], [0, -1.5 / (1 + 1.5 ** 5)],
[[0], [1.5 / (1 + 1.5 ** 5)]], [[0, 0], [1.5 / (1 + 1.5 ** 5), -1 / 2.5]], [1 / 7, 1 / 7]],
# k6
[[0, 0], [1 / 2.5, 0], [-1 / 2.5, 0], [1.5 / (1 + 1.5 ** 7), 0], [-1.5 / (1 + 1.5 ** 7), 0],
[[1.5 / (1 + 1.5 ** 7)], [0]], [[1.5 / (1 + 1.5 ** 7), -1 / 2.5], [0, 0]],
[0, 1 / 2.5], [0, -1 / 2.5], [0, 1.5 / (1 + 1.5 ** 7)], [0, -1.5 / (1 + 1.5 ** 7)],
[[0], [1.5 / (1 + 1.5 ** 7)]], [[0, 0], [1.5 / (1 + 1.5 ** 7), -1 / 2.5]], [1 / 13, 1 / 13]],
# s1
[[0, 0], [1 + 1.5, 0], [-1 + 1.5, 0], [1.5 + 1.5 ** 3, 0], [-1.5 + 1.5 ** 3, 0],
[[1.5 + 1.5 ** 3], [0]], [[1.5 + 1.5 ** 3, -1 + 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 3, 1.5], [1.5 ** 3, -1.5],
[[1.5 ** 3], [1.5]], [[1.5 ** 3, 1.5], [1.5, -1]], [1 + 2 * 1.5, 1]],
# s2
[[0, 0], [1 + 1.5, 0], [-1 + 1.5, 0], [1.5 + 1.5 ** 5, 0], [-1.5 + 1.5 ** 5, 0],
[[1.5 + 1.5 ** 5], [0]], [[1.5 + 1.5 ** 5, -1 + 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 5, 1.5], [1.5 ** 5, -1.5],
[[1.5 ** 5], [1.5]], [[1.5 ** 5, 1.5], [1.5, -1]], [1 + 4 * 1.5, 1]],
# s3
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 3], [-1.5, 1.5 ** 3],
[[1.5], [1.5 ** 3]], [[1.5, -1], [1.5 ** 3, 1.5]],
[0, 1 + 1.5], [0, -1 + 1.5], [0, 1.5 + 1.5 ** 3], [0, -1.5 + 1.5 ** 3],
[[0], [1.5 + 1.5 ** 3]], [[0, 0], [1.5 + 1.5 ** 3, -1 + 1.5]], [1, 1 + 2 * 1.5]],
# s4
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 5], [-1.5, 1.5 ** 5],
[[1.5], [1.5 ** 5]], [[1.5, -1], [1.5 ** 5, 1.5]],
[0, 1 + 1.5], [0, -1 + 1.5], [0, 1.5 + 1.5 ** 5], [0, -1.5 + 1.5 ** 5],
[[0], [1.5 + 1.5 ** 5]], [[0, 0], [1.5 + 1.5 ** 5, -1 + 1.5]], [1, 1 + 4 * 1.5]]
]
for dist, sols in zip(dist_coefs, solus):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
with self.subTest(**dist, inp=inp):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1e-1, a2=-1e-6, a3=3e-7,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1, a2=2, a3=-3,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1
)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(temp=temp, misalignment=None):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=1, k5=-5, k6=11, s1=1e-6, s2=1e2, s3=-3e-3, s4=5e-1,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=-0.5, k5=0.02, k6=0.01, s1=-0.45, s2=0.0045, s3=-0.8, s4=0.9,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=-0.5, k5=0.02, k6=0.01, s1=-0.45, s2=0.0045, s3=-0.8, s4=0.9,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T/100,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T/100]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.005, k2=-0.003, k3=0.0015, p1=1e-7, p2=1e-6,
k4=-0.005, k5=0.0002, k6=0.0001, s1=-0.0045, s2=0.000045, s3=-0.008, s4=0.009,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model, delta=1e-4)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-1, atol=1e-10)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T/10,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T/10]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=100, py=100.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.005, k2=-0.003, k3=0.0015, p1=1e-7, p2=1e-6,
k4=-0.005, k5=0.0002, k6=0.0001, s1=-0.0045, s2=0.000045, s3=-0.008, s4=0.009,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistorted_gnomic_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert)
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"k1": 1.5},
{"k2": 1.5},
{"k3": 1.5},
{"p1": 1.5},
{"p2": 1.5},
{"k4": 1.5},
{"k5": 1.5},
{"k6": 1.5},
{"s1": 1.5},
{"s2": 1.5},
{"s3": 1.5},
{"s4": 1.5},
{"k1": -1.5, "k2": -1.5, "k3": -1.5, "p1": -1.5, "p2": -1.5,
"k4": -1.5, "k5": -1.5, "k6": -1.5, "s1": -1.5, "s2": -1.5, "s3": -1.5, "s4": -1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
for inp in inputs:
with self.subTest(**dist_coef, inp=inp):
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r4 = r ** 4
r6 = r ** 6
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_dgnomic(np.array(inp), r2, r4, r6)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(np.array(inp), model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for inp in inputs:
with self.subTest(**intrins_coef, inp=inp):
num = num_deriv(inp, model, delta=1e-5)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-14, rtol=1e-5)
def test__compute_ddistorted_gnomic_ddistortion(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.k1 += delta
loc_pert_k1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k2 += delta
loc_pert_k2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k3 += delta
loc_pert_k3_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k4 += delta
loc_pert_k4_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k5 += delta
loc_pert_k5_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k6 += delta
loc_pert_k6_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p1 += delta
loc_pert_p1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p2 += delta
loc_pert_p2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.s1 += delta
loc_pert_s1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.s2 += delta
loc_pert_s2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.s3 += delta
loc_pert_s3_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.s4 += delta
loc_pert_s4_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k1 -= delta
loc_pert_k1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k2 -= delta
loc_pert_k2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k3 -= delta
loc_pert_k3_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k4 -= delta
loc_pert_k4_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k5 -= delta
loc_pert_k5_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k6 -= delta
loc_pert_k6_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p1 -= delta
loc_pert_p1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p2 -= delta
loc_pert_p2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.s1 -= delta
loc_pert_s1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.s2 -= delta
loc_pert_s2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.s3 -= delta
loc_pert_s3_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.s4 -= delta
loc_pert_s4_b = model_pert.apply_distortion(loc)
return np.array([(loc_pert_k1_f - loc_pert_k1_b) / (2 * delta),
(loc_pert_k2_f - loc_pert_k2_b) / (2 * delta),
(loc_pert_k3_f - loc_pert_k3_b) / (2 * delta),
(loc_pert_k4_f - loc_pert_k4_b) / (2 * delta),
(loc_pert_k5_f - loc_pert_k5_b) / (2 * delta),
(loc_pert_k6_f - loc_pert_k6_b) / (2 * delta),
(loc_pert_p1_f - loc_pert_p1_b) / (2 * delta),
(loc_pert_p2_f - loc_pert_p2_b) / (2 * delta),
(loc_pert_s1_f - loc_pert_s1_b) / (2 * delta),
(loc_pert_s2_f - loc_pert_s2_b) / (2 * delta),
(loc_pert_s3_f - loc_pert_s3_b) / (2 * delta),
(loc_pert_s4_f - loc_pert_s4_b) / (2 * delta)]).T
dist_coefs = [{"k1": 1.5},
{"k2": 1.5},
{"k3": 1.5},
{"p1": 1.5},
{"p2": 1.5},
{"k4": 1.5},
{"k5": 1.5},
{"k6": 1.5},
{"s1": 1.5},
{"s2": 1.5},
{"s3": 1.5},
{"s4": 1.5},
{"k1": -1.5, "k2": -1.5, "k3": -1.5, "p1": -1.5, "p2": -1.5,
"k4": -1.5, "k5": -1.5, "k6": -1.5, "s1": -1.5, "s2": -1.5, "s3": -1.5, "s4": -1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r4 = r ** 4
r6 = r ** 6
num = num_deriv(np.array(inp), model)
ana = model._compute_ddistorted_gnomic_ddistortion(np.array(inp), r2, r4, r6)
np.testing.assert_allclose(num, ana, rtol=1e-5, atol=1e-14)
def test__compute_dgnomic_dcamera_point(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0, 0]
gnom_pert_x_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, delta, 0]
gnom_pert_y_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, 0, delta]
gnom_pert_z_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [delta, 0, 0]
gnom_pert_x_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, delta, 0]
gnom_pert_y_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, 0, delta]
gnom_pert_z_b = cmodel.get_projections(loc_pert)[0]
return np.array([(gnom_pert_x_f - gnom_pert_x_b) / (2 * delta),
(gnom_pert_y_f - gnom_pert_y_b) / (2 * delta),
(gnom_pert_z_f - gnom_pert_z_b) / (2 * delta)]).T
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [0.5, 1e-14, 1]]
model = self.Class()
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dcamera_point(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-9, rtol=1e-5)
def test_get_jacobian_row(self):
def num_deriv(loc, temp, cmodel, delta=1e-8, image=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.fx += delta
pix_pert_fx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy += delta
pix_pert_fy_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha += delta
pix_pert_skew_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fx -= delta
pix_pert_fx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy -= delta
pix_pert_fy_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha -= delta
pix_pert_skew_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 += delta
pix_pert_k1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 += delta
pix_pert_k2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 += delta
pix_pert_k3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k4 += delta
pix_pert_k4_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k5 += delta
pix_pert_k5_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k6 += delta
pix_pert_k6_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s1 += delta
pix_pert_s1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s2 += delta
pix_pert_s2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s3 += delta
pix_pert_s3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s4 += delta
pix_pert_s4_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 -= delta
pix_pert_k1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 -= delta
pix_pert_k2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 -= delta
pix_pert_k3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k4 -= delta
pix_pert_k4_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k5 -= delta
pix_pert_k5_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k6 -= delta
pix_pert_k6_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s1 -= delta
pix_pert_s1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s2 -= delta
pix_pert_s2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s3 -= delta
pix_pert_s3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s4 -= delta
pix_pert_s4_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
return np.vstack([(pix_pert_fx_f - pix_pert_fx_b) / (delta * 2),
(pix_pert_fy_f - pix_pert_fy_b) / (delta * 2),
(pix_pert_skew_f - pix_pert_skew_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_k1_f - pix_pert_k1_b) / (delta * 2),
(pix_pert_k2_f - pix_pert_k2_b) / (delta * 2),
(pix_pert_k3_f - pix_pert_k3_b) / (delta * 2),
(pix_pert_k4_f - pix_pert_k4_b) / (delta * 2),
(pix_pert_k5_f - pix_pert_k5_b) / (delta * 2),
(pix_pert_k6_f - pix_pert_k6_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_s1_f - pix_pert_s1_b) / (delta * 2),
(pix_pert_s2_f - pix_pert_s2_b) / (delta * 2),
(pix_pert_s3_f - pix_pert_s3_b) / (delta * 2),
(pix_pert_s4_f - pix_pert_s4_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2)]).T
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=-0.5, k5=0.02, k6=0.01, s1=-0.45, s2=0.0045, s3=-0.8, s4=0.9,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
inputs = [[0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [[1], [2], [1200.23]]]
temps = [0, 1.5, -10]
# TODO: investigate if this is actually correct
for temperature in temps:
for inp in inputs:
with self.subTest(temperature=temperature, inp=inp):
num = num_deriv(inp, temperature, model, delta=1e-2)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temperature)
np.testing.assert_allclose(ana, num, rtol=1e-1, atol=1e-10)
num = num_deriv(inp, temperature, model, delta=1e-2, image=1)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temperature)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-1)
def test_compute_jacobian(self):
def num_deriv(loc, temp, cmodel, delta=1e-8, image=0, nimages=2) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.fx += delta
pix_pert_fx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy += delta
pix_pert_fy_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha += delta
pix_pert_skew_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fx -= delta
pix_pert_fx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy -= delta
pix_pert_fy_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha -= delta
pix_pert_skew_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 += delta
pix_pert_k1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 += delta
pix_pert_k2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 += delta
pix_pert_k3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k4 += delta
pix_pert_k4_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k5 += delta
pix_pert_k5_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k6 += delta
pix_pert_k6_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s1 += delta
pix_pert_s1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s2 += delta
pix_pert_s2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s3 += delta
pix_pert_s3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s4 += delta
pix_pert_s4_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 -= delta
pix_pert_k1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 -= delta
pix_pert_k2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 -= delta
pix_pert_k3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k4 -= delta
pix_pert_k4_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k5 -= delta
pix_pert_k5_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k6 -= delta
pix_pert_k6_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s1 -= delta
pix_pert_s1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s2 -= delta
pix_pert_s2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s3 -= delta
pix_pert_s3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.s4 -= delta
pix_pert_s4_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
return np.vstack([(pix_pert_fx_f - pix_pert_fx_b) / (delta * 2),
(pix_pert_fy_f - pix_pert_fy_b) / (delta * 2),
(pix_pert_skew_f - pix_pert_skew_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_k1_f - pix_pert_k1_b) / (delta * 2),
(pix_pert_k2_f - pix_pert_k2_b) / (delta * 2),
(pix_pert_k3_f - pix_pert_k3_b) / (delta * 2),
(pix_pert_k4_f - pix_pert_k4_b) / (delta * 2),
(pix_pert_k5_f - pix_pert_k5_b) / (delta * 2),
(pix_pert_k6_f - pix_pert_k6_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_s1_f - pix_pert_s1_b) / (delta * 2),
(pix_pert_s2_f - pix_pert_s2_b) / (delta * 2),
(pix_pert_s3_f - pix_pert_s3_b) / (delta * 2),
(pix_pert_s4_f - pix_pert_s4_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
k4=-0.5, k5=0.02, k6=0.01, s1=-0.45, s2=0.0045, s3=-0.8, s4=0.9,
misalignment=[[1e-9, -1e-9, 2e-9],
[-1e-9, 2e-9, -1e-9],
[1e-10, 2e-11, 3e-12]],
a1=0.15e-6, a2=-0.01e-7, a3=0.5e-8,
estimation_parameters=['intrinsic', 'temperature dependence', 'multiple misalignments'])
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
model.use_a_priori = False
temps = [0, -20, 20.5]
jac_ana = model.compute_jacobian(inputs, temperature=temps)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
temperature = temps[ind]
for vec in inp.T:
jac_num.append(num_deriv(vec.T, temperature, model, delta=1e-3, image=ind, nimages=numim))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-1, atol=1e-9)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temps)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
temperature = temps[ind]
for vec in inp.T:
jac_num.append(num_deriv(vec.T, temperature, model, delta=1e-3, image=ind, nimages=numim))
jac_num = np.vstack(jac_num)
jac_num =
|
np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
|
numpy.pad
|
import numpy as np
import scipy.linalg
from scipy.special import gammaln, digamma
from suffstats import ParamBag, SuffStatBag
from util import LOGTWO, LOGPI, LOGTWOPI, EPS
from util import dotATA, dotATB, dotABT
from util import as1D, as2D, as3D
from util import numpyToSharedMemArray, fillSharedMemArray
from obsmodel.AbstractObsModel import AbstractObsModel
from obsmodel.GaussObsModel import createECovMatFromUserInput
class AutoRegGaussObsModel(AbstractObsModel):
''' First-order auto-regressive data generation model.
Attributes for Prior (Matrix-Normal-Wishart)
--------
nu : float
degrees of freedom
B : 2D array, size D x D
scale matrix that sets mean of parameter Sigma
M : 2D array, size D x E
sets mean of parameter A
V : 2D array, size E x E
scale matrix that sets covariance of parameter A
Attributes for k-th component of EstParams (EM point estimates)
---------
A[k] : 2D array, size D x E
coefficient matrix for auto-regression.
Sigma[k] : 2D array, size D x D
covariance matrix.
Attributes for k-th component of Post (VB parameter)
---------
nu[k] : float
B[k] : 2D array, size D x D
M[k] : 2D array, size D x E
V[k] : 2D array, size E x E
'''
def __init__(self, inferType='EM', D=None, E=None,
min_covar=None,
Data=None,
**PriorArgs):
''' Initialize bare obsmodel with valid prior hyperparameters.
Resulting object lacks either EstParams or Post,
which must be created separately (see init_global_params).
'''
# Set dimension D
if Data is not None:
D = Data.X.shape[1]
else:
assert D is not None
D = int(D)
self.D = D
# Set dimension E
if Data is not None:
E = Data.Xprev.shape[1]
else:
assert E is not None
E = int(E)
self.E = E
self.K = 0
self.inferType = inferType
self.min_covar = min_covar
self.createPrior(Data, D=D, E=E, **PriorArgs)
self.Cache = dict()
def createPrior(
self, Data,
D=None, E=None,
nu=0, B=None,
M=None, V=None,
ECovMat=None, sF=1.0,
VMat='eye', sV=1.0, MMat='zero', sM=1.0,
**kwargs):
''' Initialize Prior ParamBag attribute.
Post Condition
------
Prior expected covariance matrix set to match provided value.
'''
if Data is None:
if D is None:
raise ValueError("Need to specify dimension D")
if E is None:
raise ValueError("Need to specify dimension E")
if Data is not None:
if D is None:
D = Data.X.shape[1]
else:
assert D == Data.X.shape[1]
if E is None:
E = Data.Xprev.shape[1]
else:
assert E == Data.Xprev.shape[1]
nu = np.maximum(nu, D + 2)
if B is None:
if ECovMat is None or isinstance(ECovMat, str):
ECovMat = createECovMatFromUserInput(D, Data, ECovMat, sF)
B = ECovMat * (nu - D - 1)
B = as2D(B)
if M is None:
if MMat == 'zero':
M = np.zeros((D, E))
elif MMat == 'eye':
assert D <= E
M = sM * np.eye(D)
M = np.hstack([M, np.zeros((D, E-D))])
assert M.shape == (D,E)
else:
raise ValueError('Unrecognized MMat: %s' % (MMat))
else:
M = as2D(M)
if V is None:
if VMat == 'eye':
V = sV * np.eye(E)
elif VMat == 'same':
assert D == E
V = sV * ECovMat
else:
raise ValueError('Unrecognized VMat: %s' % (VMat))
else:
V = as2D(V)
self.Prior = ParamBag(K=0, D=D, E=E)
self.Prior.setField('nu', nu, dims=None)
self.Prior.setField('B', B, dims=('D', 'D'))
self.Prior.setField('V', V, dims=('E', 'E'))
self.Prior.setField('M', M, dims=('D', 'E'))
def get_mean_for_comp(self, k=None):
if k is None or k == 'prior':
return np.diag(self.Prior.M)
elif hasattr(self, 'EstParams'):
return np.diag(self.EstParams.A[k])
else:
return np.diag(self.Post.M[k])
def get_covar_mat_for_comp(self, k=None):
if k is None or k == 'prior':
return self._E_CovMat()
elif hasattr(self, 'EstParams'):
return self.EstParams.Sigma[k]
else:
return self._E_CovMat(k)
def get_name(self):
return 'AutoRegGauss'
def get_info_string(self):
return 'Auto-Regressive Gaussian with full covariance.'
def get_info_string_prior(self):
msg = 'MatrixNormal-Wishart on each mean/prec matrix pair: A, Lam\n'
if self.D > 2:
sfx = ' ...'
else:
sfx = ''
M = self.Prior.M[:2, :2]
S = self._E_CovMat()[:2, :2]
msg += 'E[ A ] = \n'
msg += str(M) + sfx + '\n'
msg += 'E[ Sigma ] = \n'
msg += str(S) + sfx
msg = msg.replace('\n', '\n ')
return msg
def setEstParams(self, obsModel=None, SS=None, LP=None, Data=None,
A=None, Sigma=None,
**kwargs):
''' Initialize EstParams attribute with fields A, Sigma.
'''
self.ClearCache()
if obsModel is not None:
self.EstParams = obsModel.EstParams.copy()
self.K = self.EstParams.K
return
if LP is not None and Data is not None:
SS = self.calcSummaryStats(Data, None, LP)
if SS is not None:
self.updateEstParams(SS)
else:
A = as3D(A)
Sigma = as3D(Sigma)
self.EstParams = ParamBag(
K=A.shape[0], D=A.shape[1], E=A.shape[2])
self.EstParams.setField('A', A, dims=('K', 'D', 'E'))
self.EstParams.setField('Sigma', Sigma, dims=('K', 'D', 'D'))
def setEstParamsFromPost(self, Post):
''' Convert from Post to EstParams.
'''
D = Post.D
self.EstParams = ParamBag(K=Post.K, D=D, E=Post.E)
A = Post.M.copy()
Sigma = Post.B / (Post.nu - D - 1)[:, np.newaxis, np.newaxis]
self.EstParams.setField('A', A, dims=('K', 'D', 'E'))
self.EstParams.setField('Sigma', Sigma, dims=('K', 'D', 'D'))
self.K = self.EstParams.K
def setPostFactors(self, obsModel=None, SS=None, LP=None, Data=None,
nu=0, B=0, M=0, V=0,
**kwargs):
''' Set Post attribute to provided values.
'''
self.ClearCache()
if obsModel is not None:
if hasattr(obsModel, 'Post'):
self.Post = obsModel.Post.copy()
else:
self.setPostFromEstParams(obsModel.EstParams)
self.K = self.Post.K
return
if LP is not None and Data is not None:
SS = self.calcSummaryStats(Data, None, LP)
if SS is not None:
self.updatePost(SS)
else:
M = as3D(M)
B = as3D(B)
V = as3D(V)
K, D, E = M.shape
assert D == self.D
assert E == self.E
self.Post = ParamBag(K=K, D=self.D, E=self.E)
self.Post.setField('nu', as1D(nu), dims=('K'))
self.Post.setField('B', B, dims=('K', 'D', 'D'))
self.Post.setField('M', M, dims=('K', 'D', 'E'))
self.Post.setField('V', V, dims=('K', 'E', 'E'))
self.K = self.Post.K
def setPostFromEstParams(self, EstParams, Data=None, N=None):
''' Set Post attribute values based on provided EstParams.
'''
K = EstParams.K
D = EstParams.D
E = EstParams.E
if Data is not None:
N = Data.nObsTotal
N = np.asarray(N, dtype=np.float)
if N.ndim == 0:
N = N / K * np.ones(K)
nu = self.Prior.nu + N
B = EstParams.Sigma * (nu - D - 1)[:, np.newaxis, np.newaxis]
M = EstParams.A.copy()
V = as3D(self.Prior.V)
self.Post = ParamBag(K=K, D=D, E=E)
self.Post.setField('nu', nu, dims=('K'))
self.Post.setField('B', B, dims=('K', 'D', 'D'))
self.Post.setField('M', M, dims=('K', 'D', 'E'))
self.Post.setField('V', V, dims=('K', 'E', 'E'))
self.K = self.Post.K
def calcSummaryStats(self, Data, SS, LP, **kwargs):
""" Fill in relevant sufficient stats fields into provided SS.
Returns
-------
SS : bnpy.suffstats.SuffStatBag
"""
return calcSummaryStats(Data, SS, LP, **kwargs)
def forceSSInBounds(self, SS):
''' Force count vector N to remain positive
This avoids numerical problems due to incremental add/subtract ops
which can cause computations like
x = 10.
x += 1e-15
x -= 10
x -= 1e-15
to be slightly different than zero instead of exactly zero.
Post Condition
--------------
Field N is guaranteed to be positive.
'''
np.maximum(SS.N, 0, out=SS.N)
def incrementSS(self, SS, k, x):
pass
def decrementSS(self, SS, k, x):
pass
def calcSummaryStatsForContigBlock(self, Data, SS=None, a=0, b=0):
''' Calculate sufficient stats for a single contiguous block of data
'''
D = Data.X.shape[1]
E = Data.Xprev.shape[1]
if SS is None:
SS = SuffStatBag(K=1, D=D, E=E)
elif not hasattr(SS, 'E'):
SS._Fields.E = E
ppT = dotATA(Data.Xprev[a:b])[np.newaxis, :, :]
xxT = dotATA(Data.X[a:b])[np.newaxis, :, :]
pxT = dotATB(Data.Xprev[a:b], Data.X[a:b])[np.newaxis, :, :]
SS.setField('N', (b - a) * np.ones(1), dims='K')
SS.setField('xxT', xxT, dims=('K', 'D', 'D'))
SS.setField('ppT', ppT, dims=('K', 'E', 'E'))
SS.setField('pxT', pxT, dims=('K', 'E', 'D'))
return SS
def calcLogSoftEvMatrix_FromEstParams(self, Data, **kwargs):
''' Compute log soft evidence matrix for Dataset under EstParams.
Returns
-------
L : 2D array, size N x K
L[n,k] = log p( data n | EstParams for comp k )
'''
K = self.EstParams.K
L = np.empty((Data.nObs, K))
for k in xrange(K):
L[:, k] = - 0.5 * self.D * LOGTWOPI \
- 0.5 * self._logdetSigma(k) \
- 0.5 * self._mahalDist_EstParam(Data.X, Data.Xprev, k)
return L
def _mahalDist_EstParam(self, X, Xprev, k):
''' Calc Mahalanobis distance from comp k to every row of X.
Args
----
X : 2D array, size N x D
k : integer ID of comp
Returns
-------
dist : 1D array, size N
'''
deltaX = X - np.dot(Xprev, self.EstParams.A[k].T)
Q = np.linalg.solve(self.GetCached('cholSigma', k),
deltaX.T)
Q *= Q
return np.sum(Q, axis=0)
def _cholSigma(self, k):
''' Calculate lower cholesky decomposition of Sigma[k]
Returns
-------
L : 2D array, size D x D, lower triangular
Sigma = np.dot(L, L.T)
'''
return scipy.linalg.cholesky(self.EstParams.Sigma[k], lower=1)
def _logdetSigma(self, k):
''' Calculate log determinant of EstParam.Sigma for comp k
Returns
-------
logdet : scalar real
'''
return 2 * np.sum(np.log(np.diag(self.GetCached('cholSigma', k))))
def updateEstParams_MaxLik(self, SS):
''' Update attribute EstParams for all comps given suff stats.
Update uses the maximum likelihood objective for point estimation.
Post Condition
---------
Attributes K and EstParams updated in-place.
'''
self.ClearCache()
if not hasattr(self, 'EstParams') or self.EstParams.K != SS.K:
self.EstParams = ParamBag(K=SS.K, D=SS.D, E=SS.E)
minCovMat = self.min_covar * np.eye(SS.D)
if SS.E == SS.D:
minCovMat_EE = minCovMat
else:
minCovMat_EE = self.min_covar * np.eye(SS.E)
A = np.zeros((SS.K, self.D, self.E))
Sigma = np.zeros((SS.K, self.D, self.D))
for k in xrange(SS.K):
# Add small pos multiple of identity to make invertible
# TODO: This is source of potential stability issues.
A[k] = np.linalg.solve(SS.ppT[k] + minCovMat_EE,
SS.pxT[k]).T
Sigma[k] = SS.xxT[k] \
- 2 * np.dot(SS.pxT[k].T, A[k].T) \
+ np.dot(A[k], np.dot(SS.ppT[k], A[k].T))
Sigma[k] /= SS.N[k]
# Sigma[k] = 0.5 * (Sigma[k] + Sigma[k].T) # symmetry!
Sigma[k] += minCovMat
self.EstParams.setField('A', A, dims=('K', 'D', 'E'))
self.EstParams.setField('Sigma', Sigma, dims=('K', 'D', 'D'))
self.K = SS.K
def updateEstParams_MAP(self, SS):
''' Update attribute EstParams for all comps given suff stats.
Update uses the MAP objective for point estimation.
Post Condition
---------
Attributes K and EstParams updated in-place.
'''
self.ClearCache()
if not hasattr(self, 'EstParams') or self.EstParams.K != SS.K:
self.EstParams = ParamBag(K=SS.K, D=SS.D, E=SS.E)
raise NotImplemented('TODO')
def updatePost(self, SS):
''' Update attribute Post for all comps given suff stats.
Update uses the variational objective.
Post Condition
---------
Attributes K and Post updated in-place.
'''
self.ClearCache()
if not hasattr(self, 'Post') or self.Post.K != SS.K:
self.Post = ParamBag(K=SS.K, D=SS.D, E=SS.E)
elif not hasattr(self.Post, 'E'):
self.Post.E = SS.E
nu, B, M, V = self.calcPostParams(SS)
self.Post.setField('nu', nu, dims=('K'))
self.Post.setField('B', B, dims=('K', 'D', 'D'))
self.Post.setField('M', M, dims=('K', 'D', 'E'))
self.Post.setField('V', V, dims=('K', 'E', 'E'))
self.K = SS.K
def calcPostParams(self, SS):
''' Calc updated posterior params for all comps given suff stats
These params define the common-form of the exponential family
Normal-Wishart posterior distribution over mu, diag(Lambda)
Returns
--------
nu : 1D array, size K
B : 3D array, size K x D x D
each B[k] symmetric and positive definite
M : 3D array, size K x D x E
V : 3D array, size K x E x E
'''
Prior = self.Prior
nu = Prior.nu + SS.N
B_MVM = Prior.B + np.dot(Prior.M, np.dot(Prior.V, Prior.M.T))
B = SS.xxT + B_MVM[np.newaxis, :]
V = SS.ppT + Prior.V[np.newaxis, :]
M =
|
np.zeros((SS.K, SS.D, SS.E))
|
numpy.zeros
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
import numpy
from ..objects._dataset import Dataset
from ..utilities._checkInRange import checkInRange
def plotFeatureRanges(dataset, compounds, logx=False, histBins=20, savePath=None, figureFormat='png', dpi=72, figureSize=(4, 7)):
"""
plotFeatureRanges(dataset, compounds, logx=False, histBins=20, **kwargs)
Plot distributions plots of the values listed in **compounds**, on to a set of axes with a linked x-axis.
If reference ranges are specified in :py:attr:`~nPYc.objects.Dataset.featureMetadata`, a reference range will be drawn behind each plot. If reference ranges are available, distributions that for within the range will be shaded green, and those that fall outside red, where no reference range is available the distribution will be shaded blue.
:param Dataset dataset: Dataset object to plot values from
:param list compounds: List of features to plot
:param bool logx: Calculate and plot histograms on a log10 scale, if the minumn values is below 1, the histogram is calculated by adding one to all values
:param int histBins: Number of bins for histograms
"""
if not isinstance(dataset, Dataset):
raise TypeError('dataset must be an instance of Dataset.')
with sns.axes_style("whitegrid", rc={'axes.grid': False}):
width = figureSize[0]
height = 0.6 * len(compounds)
fig, ax = plt.subplots(len(compounds), 1, sharex=True, figsize=(width, height))
globalMinV = numpy.finfo(numpy.float64).max
globalMaxV = 0
globalMax = 0
if not isinstance(ax, numpy.ndarray):
ax = [ax]
for compound in compounds:
if compound in dataset.featureMetadata['Feature Name'].values:
featureIndex = dataset.featureMetadata.loc[dataset.featureMetadata['Feature Name'] == compound].index[0]
values = dataset.intensityData[:,featureIndex]
if numpy.min(values) < globalMinV:
globalMinV = numpy.nanmin(values)
if numpy.max(values) > globalMaxV:
globalMaxV = numpy.nanmax(values)
if globalMinV == globalMaxV:
logx = False
globalMaxV = globalMaxV + 1
if logx == True:
if globalMinV < 1:
offset = 1
globalMinV = globalMinV + offset
globalMaxV = globalMaxV + offset
else:
offset = 0
if globalMinV < 0:
logx = False
nbins = histBins
xscale = 'linear'
else:
nbins = 10 ** numpy.linspace(numpy.log10(globalMinV), numpy.log10(globalMaxV), histBins)
xscale = 'log'
else:
nbins = numpy.linspace(globalMinV, globalMaxV, histBins)
xscale = 'linear'
for i in range(len(compounds)):
if compounds[i] in dataset.featureMetadata['Feature Name'].values:
featureIndex = dataset.featureMetadata.loc[dataset.featureMetadata['Feature Name'] == compounds[i]].index[0]
feature = dataset.featureMetadata.loc[dataset.featureMetadata['Feature Name'] == compounds[i]]
values = dataset.intensityData[:,featureIndex]
if logx:
values = values + offset
if {'Upper Reference Bound', 'Upper Reference Value', 'Lower Reference Bound', 'Lower Reference Value'}.issubset(feature.columns):
minV = feature['Lower Reference Value'].values[0]
maxV = feature['Upper Reference Value'].values[0]
# Interpret '-' as no lower boud i.e. 0
if minV == '-':
minV = 0
if numpy.isfinite(minV) and
|
numpy.isfinite(maxV)
|
numpy.isfinite
|
from math import *
import numpy as np
from scipy.special import hyp2f1
from scipy.integrate import quad,romberg,fixed_quad,quad_vec
from Distances import Distances
from Sersic import Sersic
from onedimLens import lens_stat1D
import matplotlib.pyplot as plt
from scipy.optimize import bisect
class lens_stat_gnfw():
def __init__(self,z0=0,z1=.3,z2=1.5,M200=1e13,Mstar=10**11.5,c=5,Re=3,m=4,Om=.25,Or=8.4e-5,Ol=.75,H0=70,ratio=1,cratio=1,alpha=1,source_mag=25.,galaxy=True,get_rho=False):
self.statt = lens_stat1D(z0=z0,z1=z1,z2=z2,M200=M200,Mstar=Mstar*ratio,
c=c*cratio,Re=Re,m=m,Om=Om,Or=Or,Ol=Ol,H0=H0,galaxy=galaxy)
self.alpha = alpha # power law index of the gnfw profile
self.z0 = 0
self.z1 = z1
self.z2 = z2
self.mag_unlensed = source_mag
self.H0 = H0/3.086e19 #convert to s^-1
self.h = H0/100
self.d = Distances(z0=z0,z1=z1,z2=z2,Om=Om,Ol=Ol,Or=Or,H0=H0)
self.Ez = np.sqrt(Or*(1+z1)**4+Om*(1+z1)**3+Ol)
self.Hz = self.H0*self.Ez
G = 6.67e-11 # m^3 kg^-1 s^-2
self.c = c*cratio #concentration parameter
self.rhoz = 3*self.Hz**2/(8*pi*G)/1.989e30/(3.24e-23)**3 # M_sun * Mpc^(-3)
self.rhos = 200/3*self.rhoz*(3-alpha)*c**(alpha)/hyp2f1(3-alpha,3-alpha,4-alpha,-c)
self.M200 = M200*ratio
self.r200 = (self.M200/(4/3*pi*200*self.rhoz))**(1/3) #Mpc
self.rs = self.r200/c #Mpc
self.galaxy = Sersic(Mstar=Mstar,Re=Re,m=m)
self.apply_galaxy=galaxy
self.Da1 = self.d.angular_diameter_distance(self.z0,z1) #ang distance to the lens
self.Da2 = self.d.angular_diameter_distance(self.z0,z2) #ang distance to the source
self.thetas = self.rs/self.Da1 #in radians
self.Sigmacr = self.d.get_Sigmacr()
self.kappas = self.rhos*self.rs/self.Sigmacr
self.b = 4*self.rhos*self.rs/self.Sigmacr
if get_rho == False:
self.xval_min,self.caus1,self.beta_mag = self.get_xval_min()
def rho(self,r):
alpha = self.alpha
return self.rhos/((r/self.rs)**(alpha)*(1+(r/self.rs))**(3-alpha))
def Sigma(self,r):
x = r/self.rs
return 2*self.rhos*self.rs*x**(1-self.alpha)*quad_vec(lambda t:np.sin(t)*(
|
np.sin(t)
|
numpy.sin
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 19 15:05:11 2018
@author: gcgibson
"""
import sys
import numpy as np
import numpy.matlib as nm
from svgd_original import SVGD
def dln_gaussian_mean(x,mean,var):
return (1.0/var)*(x - mean )
def dln_binom(x,mean):
##dbinom x-mean (mean,1-e^-t)
return np.log(1-np.pow(np.e,-.5)) - np.log(np.pow(np.e,-.5))
def dln_observation_density(x,mean):
return dln_gaussian_mean(x,mean,1)
def dln_transition_density(x,mean):
return dln_binom(x,mean)
def dln_prior(x):
return dln_gaussian_mean(x,0,1)
class SSM:
def __init__(self, time_series):
self.time_series = time_series
def dlnprob(self, theta):
lambda_ = 3
theta = theta.reshape((300,3,20))
theta_new = []
for theta_i in theta:
tmp_p = []
tmp_p.append(dln_prior(theta_i[0])+dln_observation_density(self.time_series[0],theta_i[0]))
for i in range(1,len(self.time_series)):
tmp_p.append(dln_observation_density(self.time_series[i],theta_i[i]) + dln_transition_density(theta_i[i],theta_i[i-1]))
theta_new.append(tmp_p)
return theta_new
if __name__ == '__main__':
#time_series =np.round(np.power(np.sin(np.arange(100)+1),2)*10 + 10)
with open("/Users/gcgibson/Stein-Variational-Gradient-Descent/python/dat.json") as f:
dat = f.read()
dat = dat.split(",")
time = []
cases = []
count = 0
for elm in dat:
if count % 2 ==0:
time.append(elm.split(":")[1])
else:
cases.append(int(elm.split(":")[1].replace("}","").replace(']"]\n',"")))
count +=1
time_series =
|
np.array(cases)
|
numpy.array
|
import numpy as np
from sklearn import linear_model, svm, metrics
class OrdinalRidge(linear_model.Ridge):
"""
Overwrite Ridge from scikit-learn to use
the (minus) squared error as score function.
(see https://github.com/scikit-learn/scikit-learn/issues/3848
on why this cannot be accomplished using a GridSearchCV object)
"""
def fit(self, X, y, **fit_params):
self.unique_y_ =
|
np.unique(y)
|
numpy.unique
|
__author__ = '<NAME>'
import numpy as np
import os
import cv2
from matplotlib import pyplot as plt
def myFullTensor(myDim):
if myDim.__class__ == tuple:
return np.ones(myDim) * 7.5
else:
return None
def myFilesList(myFolder=None):
if myFolder == None or not(os.path.exists(myFolder)):
myFolder = os.getcwd()
contentList = os.listdir(myFolder)
fileList = [x for x in contentList if os.path.isfile(os.path.join(myFolder, x))]
return fileList
def myImageMean(img):
if img.__class__ != np.ndarray:
return None
else:
return np.mean(np.float64(img))
def myCommonTone(img):
if img.__class__ != np.ndarray:
return None
else:
myHist = np.zeros(256)
for i in range(256):
myHist[i] = len(img[img == i])
return
|
np.argmax(myHist)
|
numpy.argmax
|
import numpy as np
X = np.array([0.2918,
-0.3295,
0.8546,
-0.5962,
1.1042,
-0.5381,
1.1731,
-1.2153,
1.0419,
-1.5694,
1.1610,
-1.4891,
1.5439,
-1.3711,
1.6795,
-1.5151,
1.5884,
-1.6542,
1.4734,
-1.6288,
1.4955,
-1.4850,
1.4004,
-1.3038,
1.3394,
-1.2050,
1.0921,
-1.0800,
0.9654,
-0.8946,
0.8220,
-0.7448,
0.6380,
-0.5272,
0.5125,
-0.4434,
0.3662,
-0.3331,
0.2555,
-0.1915,
0.1676,
-0.1586,
0.1043,
-0.1036,
0.0759,
-0.0677])
X = np.array([X]).T
import pandas as pd
import matplotlib.pyplot as plt
signal = pd.read_csv('IUTSUM00BHZ.csv')
signal= signal.values
signal =
|
np.diff(signal[:,0])
|
numpy.diff
|
import pandas as pd
import numpy as np
import json
import re
import copy
import itertools
import math
import re, string
from collections import OrderedDict, defaultdict
from quantipy.core.helpers.constants import DTYPE_MAP
from quantipy.core.helpers.constants import MAPPED_PATTERN
from itertools import product, combinations
from scipy.stats.stats import _ttest_finish as get_pval
from operator import add, sub, mul, truediv
from quantipy.core.view import View
from quantipy.core.view_generators.view_mapper import ViewMapper
from quantipy.core.helpers import functions
from quantipy.core.tools.view import struct
import quantipy.core.tools.dp.prep
def describe(data, x, weights=None):
''' Replacment of (wrapper around) the df.describe() method that can deal with
weighted data. Weight vectors are allowed to be non-normalized, i.e.
sum of weights <> number of cases in sample. Quartile information currently
dropped from output, variance is unbiased variance.
Calculations are identical to SPSS Statistics/Professional.
'''
data = data.copy().dropna(subset=[x])
desc_df = data[x].describe()
desc_df.rename(
{
'std': 'stdDev',
'25%': 'lower quartile',
'50%': 'median',
'75%': 'upper quartile'
},
inplace=True
)
# percentile information (incorrect for weighted data!) excluded for now...
# desc_df.drop(['Lower quartile', 'Median', 'Upper quartile'], inplace=True)
if not len(data.index) == 0:
if not weights == '@1':
count = data[weights].sum()
norm_wvector_coef = 1 if len(data.index) == count else len(data.index)/count
w_squared_sum = (data[weights]**2).sum()
eff_count = count**2/w_squared_sum
mean = data[x].mul(data[weights].mul(norm_wvector_coef)).mean()
var = data[weights].mul((data[x].sub(mean))**2).sum()/(data[weights].sum()-1)
try:
stddev = math.sqrt(var)
if abs(stddev) == 0.00:
stddev = np.NaN
except:
stddev = np.NaN
desc_df['count'] = count
desc_df['eff. count'] = eff_count
desc_df['weights squared sum'] = w_squared_sum
desc_df['mean'] = mean
desc_df['stdDev'] = stddev
else:
desc_df['eff. count'] = desc_df['count']
desc_df['weights squared sum'] = 1.00
desc_df['efficiency'] = desc_df['eff. count']/desc_df['count']*100
return pd.DataFrame(desc_df[['count', 'eff. count', 'min', 'max', 'mean', 'stdDev', 'weights squared sum', 'efficiency']])
def make_default_cat_view(link, weights=None):
'''
This function is creates Quantipy's default categorical aggregations:
The x axis has to be a catgeorical single or multicode variable, the y axis
can be generated from either categorical (single or multicode) or numeric
(int/float). Numeric y axes are categorized into unique column codes.
Acts as a wrapper around _df_to_value_matrix(), _aggregate_matrix() and
set_qp_multiindex().
Parameters
----------
data : pd.DataFrame
x, y : str
Variable names from the processed case data input,
i.e. the link definition.
weighted : bool
Controls if the aggregation is performed on weighted or weighted data.
Returns
-------
view_df : pd.Dataframe (multiindexed)
'''
mat, xdef, ydef = get_matrix(link, weights)
mat = weight_matrix(mat, xdef)
df = _default_cat_df(mat, xdef, ydef)
view_df = struct.set_qp_multiindex(df, link.x, link.y)
return view_df
def make_default_str_view(data, x, y=None):
df = pd.DataFrame({x: data[x]})
return df
def make_default_num_view(data, x, y=None, weights=None, drop=None, rescale=None, get_only=None):
'''
This function is creates Quantipy's default numeric aggregations:
The x axis has to be a numeric variable of type int or float, the y axis
can be generated from either categorical (single or multicode) or numeric
(int/float) as well. Numeric y axes are categorized into unique column codes.
Acts as a wrapper around describe() and set_qp_multiindex().
Parameters
----------
data : pd.DataFrame
x, y : str
Variable names from the processed case data input,
i.e. the link definition.
weights : str
Controls if the aggregation is performed on weighted or weighted data.
Returns
-------
view_df : pd.Dataframe (multiindexed)
'''
if not drop is None:
_exclude_codes(data[x], drop)
if not rescale is None:
_rescale_codes(data[x], rescale)
weight = weights if not weights is None else '@1'
if y is None or y == '@':
df = describe(data, x, weight)
df.columns = ['@']
else:
data = data[[x, y, weight]].copy().dropna()
if len(data.index) == 0:
df = describe(data, x, weight)
df.columns = ['None']
else:
# changing column naming for x==y aggregations
if not data.columns.is_unique:
data.columns = [x, y+'_', weight]
if data[y].dtype == 'object':
# for Quantipy multicoded data on the y axis
dummy_y = _cat_to_dummies(data[y], as_df=True)
dummy_y_data = pd.concat([data[[x, weight]], dummy_y], axis=1)
df = pd.concat(
[
describe(dummy_y_data[dummy_y_data[y_code] == 1], x, weight)
for y_code in dummy_y.columns
],
axis=1
)
df.columns = dummy_y.columns
else:
y_codes = sorted(data[y].unique())
df = pd.concat(
[
describe(data[data[y] == y_code], x, weight)
for y_code in y_codes
],
axis=1
)
df.columns = [
str(int(y_code)) if float(y_code).is_integer() else str(y_code)
for y_code in y_codes
]
if get_only is None:
df['All'] = describe(data, x, weight).values
c_margin = df.xs('count')
df = df.T
df['All'] = c_margin
df = df.T
view_df = struct.set_qp_multiindex(df, x, y)
return view_df
else:
return df.T[get_only].T
def calc_nets(casedata, link, source_view, combine_codes,
use_logic=False, force_raw_sum=False):
'''
Used to compute (categorical) net code figures from a given Quantipy link
definition, a reference view dataframe and a list of codes to build from.
If the link's aggregation x axis is single coded categorical type, the
calculation is a simple addition over the qualifying x codes. If x is type
multicode, the result is calculated using the value matrix approach
(as long force_raw_sum is not set to True).
See also:
- _cat_to_dummies(), _df_to_value_matrix(), _aggregate_matrix()
- make_default_cat_view()
Parameters
----------
link : Quantipy Link object
source_view : Quantipy View object
I.e. a count or pct aggregation
combine_codes : list of integers
The list of codes to combine.
force_raw_sum : bool, optional, default=False
Controls if the calculation is performed on raw source_view figures.
This effectively treats every categorical aggregation as single coded
and is useful when needing to calculate the total responses given
instead of effective qualifying answers.
Returns
-------
net_values : np.array
Stores the calculated net values
'''
if not use_logic and (not source_view.meta['x']['is_multi']
or force_raw_sum):
boolmask = [
int(index_val[1]) in combine_codes
for index_val in source_view.index
if not (
isinstance(index_val[1], str)
and index_val[1] == 'None'
)
]
if any(boolmask):
net_values = np.array(source_view[boolmask].values.sum(axis=0))
else:
net_values = np.zeros(link['default'].dataframe.shape[1]-1)
else:
if not link.y == '@':
matrix, xdef, ydef = get_matrix(
link, weights=source_view.meta['agg']['weights'], data=casedata)
matrix = weight_matrix(matrix, xdef)
matrix = missingfy_matrix(matrix, xdef, combine_codes, keep=True)
ycodes = reversed(range(1, len(ydef)+1))
net_values = np.array([np.nansum(
matrix[:, [0]]*matrix[:, [-ycode]])
for ycode in ycodes])
else:
matrix, xdef, ydef = get_matrix(
link, weights=source_view.meta['agg']['weights'], data=casedata)
matrix = weight_matrix(matrix, xdef)
matrix = missingfy_matrix(matrix, xdef, combine_codes, keep=True)
net_values = np.nansum(matrix[:, [0]])
if net_values.size == 0:
net_values = np.zeros(link['default'].dataframe.shape[1]-1)
return net_values
def _exclude_codes(matrix, dropped):
'''
Used to drop columns from a numeric matrix representation
of a Link. This will prevent unwanted values from feeding into
the statistical calculations.
Parameters
----------
matrix : pd.DataFrame of dummy-transformed data.
As produced by _cat_to_dummies().
dropped: int or list of int (or floats)
The codes that should be dropped.
If str is passed the function automatically converts to a list
of a single element.
Returns
-------
matrix : pd.DataFrame (modified)
'''
if not isinstance(dropped, list):
dropped = [dropped]
dropped = [code for code in dropped if code in matrix.columns]
return matrix.drop(dropped, axis=1, inplace=True)
def _rescale_codes(matrix, scaling):
'''
Used to orient statistical figures produced by numerical aggregation
on a new scale, e.g. to produce means and stddev that range between 0 and 100
instead of the original survey codes that might have been 1,2,3,4,5.
Parameters
----------
matrix : pd.DataFrame of dummy-transformed data.
As produced by _cat_to_dummies.
scaling: dict
A 1-on-1 mapping of old values to new values.
Returns
-------
data : pd.DataFrame (modified)
'''
return matrix.rename(columns=scaling, inplace=True)
def calc_pct(source, base):
return pd.DataFrame(np.divide(source.values, base.values)*100)
def get_default_num_stat(default_num_view, stat, drop_bases=True, as_df=True):
'''
Is used to extract a specific statistical figure from
a given numerical default aggregation.
Parameters
----------
default_num_view : Quantipy default view
(Numerical aggregation case)
stat : string
States the figure to extract.
drop_bases : boolean, optional, default = True
Controls if the base [= 'All'] column figure is excluded
as_df : boolean, optional, default = True
If True will only return as pd.DataFrame, otherwise as np.array.
Returns
-------
pd.DataFrame
OR
np.array
'''
df = struct._partition_view_df(default_num_view, values=False, data_only=True)
if drop_bases:
df = df.drop('All', axis=1).drop('All', axis=0)
df = df.T[[stat]].T
if as_df:
return df
else:
df.values
def _aggregate_matrix(value_matrix, x_def, y_def, calc_bases=True, as_df=True):
'''
Uses a np.array containing dichotomous values and lists of column codes
to aggregate frequency tables (and bases if requested) to create basic categorical
aggregations of uni- or bivariate cell frequencies.
Parameters
----------
value_matrix : np.array with 1/0 coded values
I.e. as returned from qp.helpers.aggregation._df_to_value_matrix().
x_def, y_def : lists of column codes
calc_bases : bool, default=True
Controls if the output contains base calculations
for column, row and total base figures (cb, rb, tb).
as_df : bool, default=True
Controls if the output is returned as pd.DataFrame with regular axis indexing
(and base rows/columns ['All'] if requested).
Returns
-------
agg_df : pd.DataFrame of frequency and base figures
OR
tuple : freqs as np.array, list of base figures (column, row, total)
'''
# handling empty matrices
if np.size(value_matrix) == 0:
empty = True
freq, cb, rb, tb = np.zeros(1), np.zeros(1), np.zeros(1), np.zeros(1)
else:
empty = False
xcodes = len(x_def)+1
if not y_def is None:
# bivariate calculation (cross-tabulation)
ycodes = reversed(range(1, len(y_def)+1))
freq = np.array([
np.sum(
value_matrix[value_matrix[:, -ycode] == 1][:, 1:xcodes],
axis=0
)
for ycode in ycodes
])
if calc_bases:
ycodes = reversed(range(1, len(y_def)+1))
cb = np.array([
np.sum(
value_matrix[value_matrix[:, -ycode] == 1][:, [0]])
for ycode in ycodes
])
rb = np.sum(value_matrix[:, 1:xcodes], axis=0)
tb = np.sum(value_matrix[:, [0]], axis=0)
else:
# univariate calculation (frequency table)
freq = np.sum(value_matrix[:, 1:xcodes], axis=0)
if calc_bases:
cb = np.array(
|
np.sum(value_matrix[:, :1])
|
numpy.sum
|
##imports===============================================================================
from scipy.spatial.distance import cdist
import numpy as np
import os, sys,glob, copy, csv, time
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import Normalize
from neuropixels.continuous_traces import get_channel_count, filtr
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.manifold import TSNE
from scipy.cluster.vq import kmeans2
##===============================================================================
##===============================================================================
#probe geometry, for the summary figure only
option234_xpositions = np.zeros((192,2))
option234_ypositions = np.zeros((192,2))
option234_positions = np.zeros((384,2))
option234_positions[:,0][::4] = 21
option234_positions[:,0][1::4] = 53
option234_positions[:,0][2::4] = 5
option234_positions[:,0][3::4] = 37
option234_positions[:,1] = np.floor(np.linspace(383,0,384)/2) * 20
##===============================================================================
##helper functions===============================================================================
def read_kilosort_params(filename):
f=open(filename)
params = {}
for line in list(f):
try:
params[line.split(' =')[0]]=line.split('= ')[1].replace('\r\n','')
except:
pass
return params
def read_cluster_groups_CSV(directory):
cluster_id = [];
if os.path.isfile(os.path.join(directory,'cluster_group.tsv')):
cluster_id = [row for row in csv.reader(open(os.path.join(directory,'cluster_group.tsv')))][0:];
else:
if os.path.isfile(os.path.join(directory,'cluster_groups.csv')):
cluster_id = [row for row in csv.reader(open(os.path.join(directory,'cluster_groups.csv')))][0:];
else:
print('could not find cluster groups csv or tsv')
return None
good=[];mua=[];unsorted=[]
for i in np.arange(1,np.shape(cluster_id)[0]):
if cluster_id[i][0].split('\t')[1] == 'good':#if it is a 'good' cluster by manual sort
good.append(cluster_id[i][0].split('\t')[0])
if cluster_id[i][0].split('\t')[1] == 'mua':#if it is a 'good' cluster by manual sort
mua.append(cluster_id[i][0].split('\t')[0])
if cluster_id[i][0].split('\t')[1] == 'unsorted':#if it is a 'good' cluster by manual sort
unsorted.append(cluster_id[i][0].split('\t')[0])
return (np.array(good).astype(int),np.array(mua).astype(int),np.array(unsorted).astype(int))
def count_unique(x):
values=[]
instances=[]
for v in np.unique(x):
values.extend([v])
instances.extend([len(np.where(np.array(x)==v)[0].flatten())])
return values, instances
def ismember(a, b):
bind = {}
for i, elt in enumerate(b):
if elt not in bind:
bind[elt] = i
return [bind.get(itm, None) for itm in a] # None can be replaced by any other "not in b"
def load_waveforms(data,channel,times,pre=0.5,post=1.5,channels=384,sampling_rate=30000):
#input can be memory mapped file or a string specifiying the file to memory map. if string, deletes the memory mapped object when done, for hygiene.
pre = pre * .001
post = post * .001
channel = int(channel)
channels = int(channels)
if type(data)==str:
mm = np.memmap(data, dtype=np.int16, mode='r')
else:
mm=data
waveforms=[]
for i in times:
start = int((i - pre) * sampling_rate) * int(channels)
temp = mm[start:start+int((pre+post)*sampling_rate*channels)][channel::channels]# - mm[start:start+int((pre+post)*sampling_rate*channels)][channel::channels][0]
temp = temp - temp[0]
waveforms.extend([temp * 0.195])
if type(data)==str:
del mm
return waveforms
def mean_waveform(rawdata,times,pre=0.5,post=1.5,channels=384,sampling_rate=30000):
mean_waveform = []#np.zeros(channels,int((pre+post)*.001)*sampling_rate*channels)
for i,ch in enumerate(np.linspace(0,channels-1,channels).astype(int)):
try:
w = load_waveforms(rawdata,ch,times,pre,post,channels,sampling_rate)
except:
w = np.zeros((len(times),int(((pre+post)/1000.)*sampling_rate)))
mean_waveform.append(np.mean(w,axis=0))#[i,:]=np.mean(w).flatten()
return mean_waveform
def probe_waveforms(rawdata,times,pre=0.5,post=1.5,channels=384,sampling_rate=30000):
probe_waveform = np.zeros((channels,int((pre+post)*.001*sampling_rate),len(times)))
for i,ch in enumerate(np.linspace(0,channels-1,channels).astype(int)):
try:
probe_waveform[i,:,:] = np.array(load_waveforms(rawdata,ch,times,pre,post,channels,sampling_rate)).T
probe_waveform[i,:,:] -= np.mean(probe_waveform[i,:,:])
except:
probe_waveform[i,:,:] = 0.#np.zeros((len(times),int(((pre+post)/1000.)*sampling_rate)))
return probe_waveform.T
def load_phy_template(path,site_positions = option234_positions,**kwargs):
# load spike data that has been manually sorted with the phy-template GUI
# the site_positions should contain coordinates of the channels in probe space. for example, in um on the face of the probe
# returns a dictionary of 'good' units, each of which includes:
# times: spike times, in seconds
# template: template used for matching
# ypos: y position on the probe, calculated from the template. requires an accurate site_positions. averages template from 100 spikes.
# xpos: x position on the probe, calcualted from the template. requires an accurate site_positions. averages template from 100 spikes.
clusters = np.load(open(os.path.join(path,'spike_clusters.npy'), 'rb'))
spikes = np.load(open(os.path.join(path,'spike_times.npy'), 'rb'))
spike_templates = np.load(open(os.path.join(path,'spike_templates.npy'), 'rb'))
templates = np.load(open(os.path.join(path,'templates.npy'), 'rb'))
cluster_id = [];
#[cluster_id.append(row) for row in csv.reader(open(os.path.join(path,'cluster_group.tsv')))];
if os.path.isfile(os.path.join(path,'cluster_group.tsv')):
cluster_id = [row for row in csv.reader(open(os.path.join(path,'cluster_group.tsv')))][0:];
else:
if os.path.isfile(os.path.join(path,'cluster_groups.csv')):
cluster_id = [row for row in csv.reader(open(os.path.join(path,'cluster_groups.csv')))][0:];
else:
print('could not find cluster groups csv or tsv')
return None
if 'sampling_rate' in kwargs.keys():
samplingrate = kwargs['sampling_rate']
else:
samplingrate =30000.
print('no sampling rate specified, using default of 30kHz')
units = {}
for i in np.arange(1,np.shape(cluster_id)[0]):
if cluster_id[i][0].split('\t')[1] != 'garabge' :#:or cluster_id[i][0].split('\t')[1] == 'unsorted' :#if it is a 'good' cluster by manual sort
unit = int(cluster_id[i][0].split('\t')[0])
units[str(unit)] = {}
#get the unit spike times
units[str(unit)]['times'] = spikes[np.where(clusters==unit)]/samplingrate
units[str(unit)]['times'] = units[str(unit)]['times'].flatten()
#get the mean template used for this unit
all_templates = spike_templates[np.where(clusters==unit)].flatten()
n_templates_to_subsample = 100
random_subsample_of_templates = templates[all_templates[np.array(np.random.rand(n_templates_to_subsample)*all_templates.shape[0]).astype(int)]]
mean_template = np.mean(random_subsample_of_templates,axis=0)
units[str(unit)]['template'] = mean_template
#take a weighted average of the site_positions, where the weights is the absolute value of the template for that channel
#this gets us the x and y positions of the unit on the probe.
weights = np.zeros(site_positions.shape)
for channel in range(site_positions.shape[0]):
weights[channel,:]=np.trapz(np.abs(mean_template.T[channel,:]))
weights = weights/np.max(weights)
(xpos,ypos)=np.average(site_positions,axis=0,weights=weights)
units[str(unit)]['waveform_weights'] = weights
units[str(unit)]['xpos'] = xpos
units[str(unit)]['ypos'] = ypos - site_positions[-1][1]
return units
##===============================================================================
##===============================================================================
def ISIviolations(spikeTrain, refDur, minISI):
#modified from cortex-lab/sortingQuality GitHub by <NAME>.
isis = np.diff(spikeTrain)
nSpikes = len(spikeTrain)
numViolations = sum(isis<refDur)
violationTime = 2*nSpikes*(refDur-minISI)
totalRate = nSpikes/(spikeTrain[-1] - spikeTrain[0])
violationRate = numViolations/violationTime
fpRate = violationRate/totalRate
if fpRate > 1.:
fpRate = 1. # it is nonsense to have a rate > 1; a rate > 1 means the assumputions of this analysis are failing
return fpRate, numViolations
def isiViolations(directory,time_limits=None,tr=.0015,tc=0):
spike_clusters_path = os.path.join(directory,'spike_clusters.npy')
spike_templates_path = os.path.join(directory,'spike_templates.npy')
spike_times_path = os.path.join(directory,'spike_times.npy')
params_path = os.path.join(directory,'params.py')
if time_limits == None:
time_limits=[0,1e7]
print(' ')
print('loading data for ISI computation...')
if os.path.isfile(spike_clusters_path):
spike_clusters = np.load(spike_clusters_path)
else:
spike_clusters = np.load(spike_templates_path)
params = read_kilosort_params(params_path)
spike_times = np.load(spike_times_path) / float(params['sample_rate'])
print('computing ISI violations...')
cluster_IDs = np.unique(spike_clusters)
isiV = np.zeros(np.shape(cluster_IDs)[0])
for i,cluster_ID in enumerate(cluster_IDs):
all_spikes = spike_times[np.where(spike_clusters==cluster_ID)[0]].flatten()
if all_spikes[-1] < time_limits[0] or all_spikes[0] > time_limits[1]:
isiV[i] = np.nan
else:
spikes = all_spikes[np.where(all_spikes > time_limits[0])[0][0]:np.where(all_spikes < time_limits[1])[0][-1]]
if len(spikes) > 1:
(fp_rate, num_violations) = ISIviolations(spikes,tr,tc)
#print fp_rate
isiV[i] = fp_rate
n_spikes = len(spikes)
print('\rcluster '+str(cluster_ID)+': '+str(num_violations)+' violations ('+str(n_spikes)+' spikes), '+str(fp_rate)+' estimated FP rate',)
else:
isiV[i] = np.nan
return cluster_IDs,isiV
def cluster_signalToNoise(directory, filename, channels, time_limits=None,sigma=5.,number_to_average=250,plots=False):
spike_clusters_path = os.path.join(directory,'spike_clusters.npy')
spike_templates_path = os.path.join(directory,'spike_templates.npy')
spike_times_path = os.path.join(directory,'spike_times.npy')
params_path = os.path.join(directory,'params.py')
if time_limits == None:
time_limits=[0,1e7]
print(' ')
print('loading data for s/n computation...')
if os.path.isfile(spike_clusters_path):
spike_clusters = np.load(spike_clusters_path)
else:
spike_clusters = np.load(spike_templates_path)
try:
rawdata = np.memmap(os.path.join(directory,filename), dtype=np.int16, mode='r')
except:
print('could not load spike data. is the filename correct? (default: experiment1_100-0_0.dat)')
return None
spike_templates = np.load(spike_templates_path)
templates = np.load(os.path.join(directory,'templates.npy'))
params = read_kilosort_params(params_path)
spike_times = np.load(spike_times_path) / float(params['sample_rate'])
if channels == 'all':
channels = get_channel_count(directory)
site_positions=option234_positions[np.linspace(0,channels-1,channels).astype(int)]
data = load_phy_template(directory,site_positions)
print('computing Quian Quiroga signal/noise ratio...')
cluster_IDs = np.unique(spike_clusters)
sn_peak = np.zeros(np.shape(cluster_IDs)[0])
sn_mean = np.zeros(np.shape(cluster_IDs)[0])
#spike_amplitudes = []
sn_all=[]
for i,cluster_ID in enumerate(cluster_IDs):
print('\rcluster '+str(cluster_ID)+': '+str(i)+'/'+str(len(cluster_IDs)),)
#peak_y_channel = np.where(data[clusterID]['waveform_weights'] == np.max(data[clusterID]['waveform_weights']))[0][0]
all_spikes = spike_times[np.where(spike_clusters==cluster_ID)[0]].flatten()
if all_spikes[-1] < time_limits[0] or all_spikes[0] > time_limits[1] or str(cluster_ID) not in data.keys():
sn_peak[i] = np.nan
sn_mean[i] = np.nan
sn_all.append([np.nan])
else:
spikes = all_spikes[np.where(all_spikes > time_limits[0])[0][0]:np.where(all_spikes < time_limits[1])[0][-1]]
channels_with_template = [ch for ch,t in enumerate(data[str(cluster_ID)]['waveform_weights']) if np.max(np.abs(t))>0.1] # <----- get the channels that have a template > 20uV
if len(spikes) > number_to_average:
sub_times = np.random.choice(spikes,number_to_average,replace=False)
else:
sub_times = spikes
random_times = np.random.rand(number_to_average) * (np.max(spike_times)-np.min(spike_times)) + np.min(spike_times)
s_ns = []
if plots:
plt.figure()
for channel in channels_with_template:
ws = load_waveforms(os.path.join(directory,filename),channel,sub_times,channels=channels)
rs = load_waveforms(os.path.join(directory,filename),channel,random_times,channels=channels)
mean = np.mean(ws,axis=0)
# if time_limits[1]==1e7:
# cunk = rawdata[int(650*int(params['sample_rate'].strip('.'))*channels):int(660*int(params['sample_rate'].strip('.'))*channels)][channel::channels]
# else:
# cunk = rawdata[int(time_limits[0]*int(params['sample_rate'].strip('.'))*channels):int(time_limits[1]*int(params['sample_rate'].strip('.'))*channels)][channel::channels]
# channel_chunk = filtr(cunk,300,6000,float(params['sample_rate']),3) * 0.195 # <-- re-filter and put into uV
#noise = sigma * np.median(np.abs(channel_chunk)/0.6725)
noise = sigma * np.median(np.abs(np.array(rs))/0.6725) # <--- as defined by <NAME> 2004
signal = np.max(np.abs(mean)) # <-- convert to uV
# if signal/noise > np.max(s_ns):
# amplitudes = [np.max(np.abs(w)) for w in np.array(waveforms).T]
s_ns.extend([signal/noise])
if plots:
plt.plot(mean)
if plots:
plt.title(str(np.max(s_ns)))
sn_peak[i] = np.max(s_ns)
sn_mean[i] = np.mean(s_ns)
sn_all.append(s_ns)
#spike_amplitudes.append(amplitudes)
print('\rcluster '+str(cluster_ID)+': '+str(np.max(s_ns))+' sn',)
return cluster_IDs, sn_peak, sn_mean, sn_all# spike_amplitudes,sn_all
def masked_cluster_quality(directory,time_limits=None,n_fet=3,minimum_number_of_spikes=10):
pc_features_path = os.path.join(directory,'pc_features.npy')
pc_features_ind_path = os.path.join(directory,'pc_feature_ind.npy')
spike_clusters_path = os.path.join(directory,'spike_clusters.npy')
spike_templates_path = os.path.join(directory,'spike_templates.npy')
spike_times_path = os.path.join(directory,'spike_times.npy')
params_path = os.path.join(directory,'params.py')
params = read_kilosort_params(params_path)
try:
pc_features = np.load(pc_features_path)
except:
print('loading PC features failed.')
return None
pc_feature_ind = np.load(pc_features_ind_path)
if os.path.isfile(spike_clusters_path):
print('building features matrix from clusters / templates')
spike_clusters = np.load(spike_clusters_path)
spike_templates = np.load(spike_templates_path)
spike_times = np.load(spike_times_path) / float(params['sample_rate'])
cluster_IDs = np.unique(spike_clusters)
n_clusters = len(cluster_IDs)
n_spikes = len(spike_clusters)
n_fet_per_chan = np.shape(pc_features)[1]
n_templates = np.shape(pc_feature_ind)[0]
new_fet = np.zeros((n_spikes,n_fet_per_chan,n_fet))
new_fet_inds = np.zeros((n_clusters,n_fet))
if time_limits == None:
time_limits=[0,1e7]
else:
print('spike_clusters do not exist, using spike_templates instead')
print('computing cluster qualities...')
(cluster_IDs,unit_quality,contamination_rate,flda) = masked_cluster_quality_sparse(spike_clusters,pc_features,pc_feature_ind,spike_times,spike_templates,time_limits)
return cluster_IDs,unit_quality,contamination_rate, flda
def masked_cluster_quality_sparse(spike_clusters,pc_features,pc_feature_ind,spike_times,spike_templates,time_limits=None,n_fet=3,fet_n_chans=6):
fet_N = np.shape(pc_features)[1] * fet_n_chans
N = len(spike_clusters)
cluster_IDs = np.unique(spike_clusters)
unit_quality = np.zeros(len(cluster_IDs))
contamination_rate = np.zeros(len(cluster_IDs))
flda = np.zeros(len(cluster_IDs))
print('number of clusters: '+str(np.shape(unit_quality)[0]))
if time_limits == None:
time_limits=[0,1e7]
for i,cluster_ID in enumerate(cluster_IDs):
all_spikes = spike_times[np.where(spike_clusters==cluster_ID)[0]].flatten()
if all_spikes[-1] < time_limits[0] or all_spikes[0] > time_limits[1]:
unit_quality[i] = 0;
contamination_rate[i] =np.nan;
else:
these_spikes = np.where(spike_clusters==cluster_ID)[0][np.where(all_spikes > time_limits[0])[0][0]:np.where(all_spikes < time_limits[1])[0][-1]+1]
n_spikes_in_cluster = len(these_spikes)
if n_spikes_in_cluster < fet_n_chans or n_spikes_in_cluster > N/2.:
unit_quality[i] = 0;
contamination_rate[i] =np.nan;
else:
fet_this_cluster = pc_features[these_spikes,:n_fet,:fet_n_chans]#.reshape((len(these_spikes),-1))
#print 'this cluster PCs: '+str(np.shape(fet_this_cluster))
these_templates = spike_templates[these_spikes]
#count the templates in this unit and their frequency of occurence:
(included_templates,instances) = count_unique(these_templates)
#use the template that occurs most frequently:
this_template = included_templates[np.where(instances==np.max(instances))[0][0]]
this_cluster_chans = pc_feature_ind[this_template,:fet_n_chans]
other_clusters_IDs = []
fet_other_clusters = []
for ii,cluster_2 in enumerate(cluster_IDs):
try:
if cluster_2 != cluster_ID:
all_spikes_2 = spike_times[np.where(spike_clusters==cluster_2)[0]].flatten()
these_spikes_2 = np.where(spike_clusters==cluster_2)[0][np.where(all_spikes_2 > time_limits[0])[0][0]:np.where(all_spikes_2 < time_limits[1])[0][-1]+1]
these_templates_2 = spike_templates[these_spikes_2]
#count the templates in this unit and their frequency of occurence:
(included_templates_2,instances_2) = count_unique(these_templates_2)
#use the template that occurs most frequently:
this_template_2 = included_templates_2[np.where(instances_2==np.max(instances_2))[0][0]]
#get the channels that have signal in this template
cluster_2_chans = pc_feature_ind[this_template_2]
if np.any(np.in1d(this_cluster_chans,cluster_2_chans)): #<---continue only if the second cluster has an overlapping channel with the main cluster
#spikes_other_clusters.extend(these_spikes_2)
fet_cluster2 = np.zeros((np.shape(these_spikes_2)[0],n_fet,fet_n_chans)) #<---make an empty matrix with dimensions: [number of spikes,number of features, number of channels]
#get the pca values, for each channel and each feature, that overlap with the main cluster in question
for iii,ch in enumerate(this_cluster_chans):
if np.in1d(this_cluster_chans,cluster_2_chans)[iii]:
fet_cluster2[:,:,iii] = pc_features[these_spikes_2,:n_fet,np.where(ch==cluster_2_chans)[0][0]]
fet_other_clusters.extend(fet_cluster2)
except:
pass
#reshape to the arrays to dimensions: [number of spikes, (number of features * number of channels)]
try:
fet_other_clusters = np.array(fet_other_clusters).reshape((np.shape(fet_other_clusters)[0],-1))
fet_this_cluster = fet_this_cluster.reshape((len(these_spikes),-1))
except ValueError:
pass
#pass the features to core to calculate isolation distance and mahalanobis contamination
unit_quality[i],contamination_rate[i] = masked_cluster_quality_core(fet_this_cluster,fet_other_clusters,plots=False)
#pass the features to flda to calculate the d prime of this cluster from all other spikes, using Fisher's LDA
try:
flda[i] = masked_cluster_quality_flda(fet_this_cluster,fet_other_clusters,plots=False)
except ValueError:
pass
#pass spike times to neighbor similarity to measure the distance between
#neighbor_similarity[i],spike_similarity = masked_waveform_similarity(these_spikes,spikes_other_clusters,number_to_average=250,metric='euclidean')
#plans for the future:
#l_ratio[i] = masked_cluster_quality_lratio(fet_this_cluster,fet_other_clusters,plots=False)
print('\rcluster '+str(cluster_ID)+': # spikes:'+str(np.shape(all_spikes)[0])+' iso. distance:'+str(unit_quality[i])+' contamination:'+str(contamination_rate[i])+' '+str(i+1)+'/'+str(np.shape(unit_quality)[0]),)
return cluster_IDs,unit_quality,contamination_rate,flda
# def masked_waveform_similarity(directory,number_to_average=250,metric='euclidean'):
# params_path = os.path.join(directory,'params.py')
# filename = read_kilosort_params(params_path)['dat_path'][1:-1]
#
# if len(spikes) > number_to_average:
# sub_times = np.random.choice(spikes,number_to_average,replace=False)
# else:
# sub_times = spikes
#
# random_times = np.random.rand(number_to_average) * (np.max(spike_times)-np.min(spike_times)) + np.min(spike_times)
#
# for channel in channels_with_template:
# ws = load_waveforms(os.path.join(directory,filename),channel,sub_times,channels=channels)
#
# return neighbor_similarity_dprime,spike_by_spike_similarity
def masked_cluster_quality_flda(fet_this_cluster,fet_other_clusters,plots=False):
#based on documentation here: http://sebastianraschka.com/Articles/2014_python_lda.html
flda =LDA(n_components=1)
X = np.concatenate((fet_this_cluster,
fet_other_clusters))
y = np.concatenate((np.zeros(np.shape(fet_this_cluster)[0]),
np.ones(np.shape(fet_other_clusters)[0])))
X_flda = flda.fit_transform(X, y)
flda_this_cluster = X_flda[:np.shape(fet_this_cluster)[0]]
flda_other_cluster = X_flda[np.shape(fet_this_cluster)[0]:]
dprime = (np.mean(flda_this_cluster) - np.mean(flda_other_cluster))/np.sqrt(0.5*(np.std(flda_this_cluster)**2+np.std(flda_other_cluster)**2))
return dprime
def masked_cluster_quality_core(fet_this_cluster,fet_other_clusters,point_limit=20000000,plots=False):
n = np.shape(fet_this_cluster)[0]
n_other = np.shape(fet_other_clusters)[0]
n_fet = np.shape(fet_this_cluster)[1]
if n_other > n and n > n_fet:
if n > point_limit:
random_indices = np.random.choice(n,point_limit,replace=False)
fet_this_cluster = fet_this_cluster[random_indices,:]
if n_other > point_limit:
random_indices = np.random.choice(n_other,point_limit,replace=False)
fet_other_clusters = fet_other_clusters[random_indices,:]
md = np.sort(cdist(fet_this_cluster.mean(0).reshape(1,fet_this_cluster.shape[1]),
fet_other_clusters,
'mahalanobis')[0])
md_self = np.sort(cdist(fet_this_cluster.mean(0).reshape(1,fet_this_cluster.shape[1]),
fet_this_cluster,
'mahalanobis')[0])
#print fet_this_cluster.mean(0).reshape(1,fet_this_cluster.shape[1])
if plots:
plt.figure()
plt.plot(fet_this_cluster[:,0],fet_this_cluster[:,1],'r.')
plt.plot(fet_other_clusters[:n,0],fet_other_clusters[:n,1],'b.')
plt.plot(fet_this_cluster.mean(0).reshape(1,fet_this_cluster.shape[1])[0][0],fet_this_cluster.mean(0).reshape(1,fet_this_cluster.shape[1])[0][1],'*',color='#ffcccc',ms=12)
#
plt.figure()
plt.hist(md[:n],bins=100,range=(0,10),color='b')
plt.hist(md_self,bins=100,range=(0,10),color='r')
plt.title('iso: '+str(np.max(md[:n])))
unit_quality = np.max(md[:n])
contamination_rate = 1 - (tipping_point(md_self,md) / float(len(md_self)))
else:
unit_quality = 0
contamination_rate = np.nan
return unit_quality, contamination_rate
def tipping_point(x,y):
# Input: x, y are sorted ascending arrays of positive numbers
# Output: minimal pos s.t. sum(x > x(pos)) <= sum(y < x(pos))
#original:
# algorithm here is to sort x and y together, and determine the indices of
# x in this sorted list (call this xInds). Then, xInds-(1:length(xInds))
# will be the number of y's that are less than that value of x.
# translated from matlab (by <NAME>) to python:
# algorithm here is to sort x and y together, and determine how many y are less than x
# in the first len(x) instances of the sorted together array.
#pos = [ind for ind,together in enumerate(np.sort(np.array(zip(x,y)).flatten())) if np.any(together==x)][:len(x)][-1]-len(x)
#pos = np.where(np.array(np.sort(np.concatenate((x,y)).flatten()))[:len(x)] == np.sort(x)[-1])[0][0] - len(x)
#print str(np.where(np.array(np.sort(np.array(zip(x,y)).flatten()))== np.sort(x)[-1])[0][0])+' furthest self: '+str(x[-1])+' closest other:'+str(y[0])
pos = sum(np.in1d(np.array(np.sort(np.concatenate((x,y)).flatten()))[:len(x)],np.sort(x)))
return pos
##===============================================================================
##plotting functions
##===============================================================================
def placeAxesOnGrid(fig,dim=[1,1],xspan=[0,1],yspan=[0,1],wspace=None,hspace=None,):
'''
Takes a figure with a gridspec defined and places an array of sub-axes on a portion of the gridspec
Takes as arguments:
fig: figure handle - required
dim: number of rows and columns in the subaxes - defaults to 1x1
xspan: fraction of figure that the subaxes subtends in the x-direction (0 = left edge, 1 = right edge)
yspan: fraction of figure that the subaxes subtends in the y-direction (0 = top edge, 1 = bottom edge)
wspace and hspace: white space between subaxes in vertical and horizontal directions, respectively
returns:
subaxes handles
written by <NAME>
'''
import matplotlib.gridspec as gridspec
outer_grid = gridspec.GridSpec(100,100)
inner_grid = gridspec.GridSpecFromSubplotSpec(dim[0],dim[1],
subplot_spec=outer_grid[int(100*yspan[0]):int(100*yspan[1]),int(100*xspan[0]):int(100*xspan[1])],
wspace=wspace, hspace=hspace)
#NOTE: A cleaner way to do this is with list comprehension:
# inner_ax = [[0 for ii in range(dim[1])] for ii in range(dim[0])]
inner_ax = dim[0]*[dim[1]*[fig]] #filling the list with figure objects prevents an error when it they are later replaced by axis handles
inner_ax = np.array(inner_ax)
idx = 0
for row in range(dim[0]):
for col in range(dim[1]):
inner_ax[row][col] = plt.Subplot(fig, inner_grid[idx])
fig.add_subplot(inner_ax[row,col])
idx += 1
inner_ax = np.array(inner_ax).squeeze().tolist() #remove redundant dimension
return inner_ax
def cleanAxes(ax,bottomLabels=False,leftLabels=False,rightLabels=False,topLabels=False,total=False):
ax.tick_params(axis='both',labelsize=10)
ax.spines['top'].set_visible(False);
ax.yaxis.set_ticks_position('left');
ax.spines['right'].set_visible(False);
ax.xaxis.set_ticks_position('bottom')
if not bottomLabels or topLabels:
ax.set_xticklabels([])
if not leftLabels or rightLabels:
ax.set_yticklabels([])
if rightLabels:
ax.spines['right'].set_visible(True);
ax.spines['left'].set_visible(False);
ax.yaxis.set_ticks_position('right');
if total:
ax.set_frame_on(False);
ax.set_xticklabels('',visible=False);
ax.set_xticks([]);
ax.set_yticklabels('',visible=False);
ax.set_yticks([])
def psth_line(times,triggers,pre=0.5,timeDomain=False,post=1,binsize=0.05,ymax=75,yoffset=0,output='fig',name='',color='#00cc00',linewidth=0.5,axes=None,labels=True,sparse=False,labelsize=18,axis_labelsize=20,error='',alpha=0.5,**kwargs):
post = post + 1
peris=[]#np.zeros(len(triggers),len(times))
p=[]
if timeDomain:
samplingRate = 1.0
else:
samplingRate = samplingRate
times = np.array(times).astype(float) / samplingRate + pre
triggers = np.array(triggers).astype(float) / samplingRate
numbins = (post+pre) / binsize
bytrial = np.zeros((len(triggers),numbins))
for i,t in enumerate(triggers):
if len(np.where(times >= t - pre)[0]) > 0 and len(np.where(times >= t + post)[0]) > 0:
start = np.where(times >= t - pre)[0][0]
end = np.where(times >= t + post)[0][0]
for trial_spike in times[start:end-1]:
if float(trial_spike-t)/float(binsize) < float(numbins):
bytrial[i][(trial_spike-t)/binsize-1] +=1
else:
pass
#bytrial[i][:]=0
#print 'start: ' + str(start)+' end: ' + str(end)
variance = np.std(bytrial,axis=0)/binsize/np.sqrt((len(triggers)))
hist = np.mean(bytrial,axis=0)/binsize
edges = np.linspace(-pre,post,numbins)
if output == 'fig':
if error == 'shaded':
if 'shade_color' in kwargs.keys():
shade_color=kwargs['shade_color']
else:
shade_color=color
if axes == None:
plt.figure()
axes=plt.gca()
plt.locator_params(axis='y',nbins=4)
upper = hist+variance
lower = hist-variance
axes.fill_between(edges[2:-1],upper[2:-1]+yoffset,hist[2:-1]+yoffset,alpha=alpha,color='white',facecolor=shade_color)
axes.fill_between(edges[2:-1],hist[2:-1]+yoffset,lower[2:-1]+yoffset,alpha=alpha,color='white',facecolor=shade_color)
axes.plot(edges[2:-1],hist[2:-1]+yoffset,color=color,linewidth=linewidth)
axes.set_xlim(-pre,post-1)
axes.set_ylim(0,ymax);
if sparse:
axes.set_xticklabels([])
axes.set_yticklabels([])
else:
if labels:
axes.set_xlabel(r'$time \/ [s]$',fontsize=axis_labelsize)
axes.set_ylabel(r'$firing \/ rate \/ [Hz]$',fontsize=axis_labelsize)
axes.tick_params(axis='both',labelsize=labelsize)
axes.spines['top'].set_visible(False);axes.yaxis.set_ticks_position('left')
axes.spines['right'].set_visible(False);axes.xaxis.set_ticks_position('bottom')
axes.set_title(name,y=0.5)
return axes
else:
if axes == None:
plt.figure()
axes=plt.gca()
f=axes.errorbar(edges,hist,yerr=variance,color=color)
axes.set_xlim(-pre,post - 1)
if ymax=='auto':
pass
else:
axes.set_ylim(0,ymax)
if sparse:
axes.set_xticklabels([])
axes.set_yticklabels([])
else:
if labels:
axes.set_xlabel(r'$time \/ [s]$',fontsize=axis_labelsize)
axes.set_ylabel(r'$firing \/ rate \/ [Hz]$',fontsize=axis_labelsize)
axes.tick_params(axis='both',labelsize=labelsize)
axes.spines['top'].set_visible(False);axes.yaxis.set_ticks_position('left')
axes.spines['right'].set_visible(False);axes.xaxis.set_ticks_position('bottom')
axes.set_title(name)
return axes
if output == 'hist':
return (hist,edges)
if output == 'p':
return (edges,hist,variance)
def project_linear_quality(data,labels,plots=False):
flda =LDA(n_components=1)
if np.shape(data)[0] == np.shape(labels)[0]:
X_flda = flda.fit_transform(data, labels)
return X_flda
else:
print('dimensions of data do not match labels')
return None
def get_spike_amplitudes(datapath,channel,times,pre=0.5,post=1.5,channels=384,sampling_rate=30000):
pre = pre * .001
post = post * .001
mm = np.memmap(datapath, dtype=np.int16, mode='r')
amplitudes = []
channel = int(channel)
for i in times:
start = int((i - pre) * sampling_rate) * int(channels)
temp=mm[start:start+int((pre+post)*sampling_rate*channels)][channel::channels]
amplitudes.extend([np.max(np.abs(temp-temp[0]))])
return amplitudes
def get_PCs(datapath,channel,times,PC=1,pre=0.5,post=1.5,channels=384,sampling_rate=30000):
pre = pre * .001
post = post * .001
mm = np.memmap(datapath, dtype=np.int16, mode='r')
PCs = []
for i in times:
start = int((i - pre) * sampling_rate) * channels
amplitudes.extend([np.max(np.abs(mm[start:start+(pre+post)*sampling_rate*channels][channel::channels] - mm[start:start+(pre+post)*sampling_rate*channels][channel::channels][0]))])
return PCs
# %%
def neuron_fig(clusterID,df,sortpath,filename='experiment1_102-0_0.dat',time_limits=None,timeplot_binsize=60.,neighbor_colors=["#67572e","#50a874","#ff4d4d"]):
channels = 175 #get_channel_count(sortpath)
site_positions=option234_positions[np.linspace(0,channels-1,channels).astype(int)]
data = load_phy_template(sortpath,site_positions)
cluster_ID = int(clusterID)
cluster_IDs = np.load(os.path.join(sortpath,'spike_clusters.npy'))
pc_data = np.load(os.path.join(sortpath,'pc_features.npy'))
pc_ind_data = np.load(os.path.join(sortpath,'pc_feature_ind.npy'))
params_path = os.path.join(sortpath,'params.py')
params = read_kilosort_params(params_path)
spike_times_data = np.load(os.path.join(sortpath,'spike_times.npy'))/ float(params['sample_rate'])
spike_templates = np.load(os.path.join(sortpath,'spike_templates.npy'))
datapath=os.path.join(sortpath,filename)
#df_line = df[df.clusterID==int(clusterID)]
fig = plt.figure(figsize=(11,8.5))
ax_text = placeAxesOnGrid(fig,xspan=[0.0,0.4],yspan=[0,0.1])
ax_position = placeAxesOnGrid(fig,xspan=[0,0.1],yspan=[0.12,1.0])
ax_waveform = placeAxesOnGrid(fig,xspan=[0.2,0.45],yspan=[0.12,0.65])
ax_time = placeAxesOnGrid(fig,xspan=[0.2,1.0],yspan=[0.82,1.0])
ax_PCs = placeAxesOnGrid(fig,xspan=[0.5,0.8],yspan=[0,0.35])
ax_ACG = placeAxesOnGrid(fig,dim=[1,2],xspan=[0.55,1.0],yspan=[0.5,0.7])
#ax_neighbor_waveform_1 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.82,1.0],yspan=[0,0.13])
#ax_neighbor_waveform_2 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.82,1.0],yspan=[0.13,0.26])
#ax_neighbor_waveform_3 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.82,1.0],yspan=[0.26,0.39])
ax_neighbor_waveforms=placeAxesOnGrid(fig,dim=[1,1],xspan=[0.82,1.0],yspan=[0.0,0.39])#[ax_neighbor_waveform_1,ax_neighbor_waveform_2,ax_neighbor_waveform_3]
ax_CCGs_1 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.53,0.68],yspan=[0.36,0.48])
ax_CCGs_2 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.7,.85],yspan=[0.36,0.48])
ax_CCGs_3 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.86,1.0],yspan=[0.36,0.48])
ax_CCGs = [ax_CCGs_1,ax_CCGs_2,ax_CCGs_3]
#position plot
ax_position.imshow(data[clusterID]['waveform_weights'][::4],extent=(site_positions[:,0][::4][0],site_positions[:,0][::4][0]+16,site_positions[:,1][::4][0],site_positions[:,1][::4][-1]),cmap=plt.cm.gray_r,clim=(0,0.5),interpolation='none')
ax_position.imshow(data[clusterID]['waveform_weights'][1::4],extent=(site_positions[:,0][1::4][0],site_positions[:,0][1::4][0]+16,site_positions[:,1][1::4][0],site_positions[:,1][1::4][-1]),cmap=plt.cm.gray_r,clim=(0,0.5),interpolation='none')
ax_position.imshow(data[clusterID]['waveform_weights'][2::4],extent=(site_positions[:,0][2::4][0],site_positions[:,0][2::4][0]+16,site_positions[:,1][2::4][0],site_positions[:,1][2::4][-1]),cmap=plt.cm.gray_r,clim=(0,0.5),interpolation='none')
ax_position.imshow(data[clusterID]['waveform_weights'][3::4],extent=(site_positions[:,0][3::4][0],site_positions[:,0][3::4][0]+16,site_positions[:,1][3::4][0],site_positions[:,1][3::4][-1]),cmap=plt.cm.gray_r,clim=(0,0.5),interpolation='none')
ax_position.set_aspect(0.1)
ax_position.set_ylim(3840,0)
ax_position.set_xlim(70,0)
cleanAxes(ax_position)
ax_position.set_title('neuron position')
#time limits
if time_limits == None:
time_limits=[0,1e7]
all_spikes = data[clusterID]['times']
spike_times = all_spikes[np.where(all_spikes > time_limits[0])[0][0]:np.where(all_spikes < time_limits[1])[0][-1]]
#print len(all_spikes)
#print all_spikes[0]
#print all_spikes[-1]
##print np.where(all_spikes > time_limits[0])[0][0]
#print np.where(all_spikes < time_limits[1])[0][-1]
these_spikes = np.where(cluster_IDs==cluster_ID)[0][np.where(all_spikes > time_limits[0])[0][0]:np.where(all_spikes < time_limits[1])[0][-1]]
#spike_times = spike_times_data[these_spikes]
#for PC and CCG display, find close by clusters calculate
#PC plot
number_of_spikes_to_plot = 5000
these_templates=spike_templates[np.where(cluster_IDs==cluster_ID)[0]]
(included_templates,instances) = count_unique(these_templates)
this_template = included_templates[int(np.where(instances==np.max(instances))[0])]
ch1 = pc_ind_data[this_template][0]
ch2 = pc_ind_data[this_template][1]
ax_PCs.plot(pc_data[these_spikes][:number_of_spikes_to_plot,0,0],
pc_data[these_spikes][:number_of_spikes_to_plot,0,1],'bo',ms=1.5,markeredgewidth=0)
nearby_trio = [0,0,0];
nearby_euclids = [10000,10000,10000];
nearby_times=[]
for other_cluster in data.keys():
if other_cluster != clusterID:
if (np.abs(data[clusterID]['ypos']-data[other_cluster]['ypos']) + np.abs(data[clusterID]['xpos']-data[other_cluster]['xpos'])) < nearby_euclids[-1]:
rank = np.where((np.abs(data[clusterID]['ypos']-data[other_cluster]['ypos']) + np.abs(data[clusterID]['xpos']-data[other_cluster]['xpos'])) < nearby_euclids)[0][0]
nearby_euclids[rank] = (np.abs(data[clusterID]['ypos']-data[other_cluster]['ypos']) + np.abs(data[clusterID]['xpos']-data[other_cluster]['xpos']))
nearby_trio[rank]=other_cluster
print(nearby_trio)
for ii,neighbor in enumerate(nearby_trio):
all_spikes_neighbor = data[neighbor]['times']
indices_neighbor= np.where(cluster_IDs==int(neighbor))[0][np.where(all_spikes_neighbor > time_limits[0])[0][0]:np.where(all_spikes_neighbor < time_limits[1])[0][-1]]
neighbor_templates=spike_templates[indices_neighbor]
neighbor_spike_times = all_spikes_neighbor[np.where(all_spikes_neighbor > time_limits[0])[0][0]:np.where(all_spikes_neighbor < time_limits[1])[0][-1]]
nearby_times.append(neighbor_spike_times)
(included_templates,instances) = count_unique(neighbor_templates)
this_template = included_templates[int(np.where(instances==np.max(instances))[0])]
ch1_index = np.where(pc_ind_data[this_template] == ch1)[0]
ch2_index = np.where(pc_ind_data[this_template] == ch2)[0]
if ch2_index.size != 0 and ch1_index.size != 0:
ax_PCs.plot(pc_data[indices_neighbor][:number_of_spikes_to_plot,0,np.where(pc_ind_data[this_template] == ch1)[0][0]],
pc_data[indices_neighbor][:number_of_spikes_to_plot,0,np.where(pc_ind_data[this_template] == ch2)[0][0]],
'o',color=neighbor_colors[ii],ms=1,markeredgewidth=0,alpha=0.8)
all_diffs = []
for spike_time in spike_times:
try:
neighbor_start = np.where(neighbor_spike_times < spike_time - 0.5)[0][-1]
except:
neighbor_start = 0
try:
neighbor_end = np.where(neighbor_spike_times > spike_time + 0.5)[0][0]
except:
neighbor_end = -1
neighbor_chunk = neighbor_spike_times[neighbor_start:neighbor_end]
#print '\r'+str(spike_time)+' '+str(neighbor_start)+' - '+str(neighbor_end),
all_diffs.extend(neighbor_chunk - spike_time)
all_diffs=np.array(all_diffs).flatten()
hist,edges = np.histogram(all_diffs,bins=np.linspace(-0.2,0.2,400))
ax_CCGs[ii].plot(edges[:-1],hist,drawstyle='steps',color=neighbor_colors[ii])
ax_CCGs[ii].set_xlim(-.02,.02)
ax_CCGs[ii].xaxis.set_ticks([-0.075,0.0,0.075])
ax_CCGs[ii].axvline(0.0,ymax=0.1,ls='--',color='#ff8080')
cleanAxes(ax_CCGs[ii],total=True)
cleanAxes(ax_neighbor_waveforms,total=True)
cleanAxes(ax_PCs)
ax_PCs.set_title('PC features')
#waveform plot
cleanAxes(ax_waveform,total=True)
ax_waveform.set_title('waveform')
channel_offset = 0 # should not be 0 if not starting from tip.
peak_y_channel = np.where(data[clusterID]['waveform_weights'] == np.max(data[clusterID]['waveform_weights']))[0][0]
times = np.random.choice(spike_times,100,replace=False)
random_times = np.random.rand(100) * (np.max(spike_times)-np.min(spike_times)) + np.min(spike_times)
xoffs=[0,100,50,150,0,100,50,150,0,100,50,150,0,100,50,150,0,100][::-1]
yoff=100
signal = 0;
for ii,channel in enumerate(np.linspace(peak_y_channel-8,peak_y_channel+10,18)):
if channel > 0:
ws = load_waveforms(datapath,channel,times,channels=channels)
ws_bkd = load_waveforms(datapath,channel,random_times,channels=channels)
x_range = np.linspace(xoffs[ii],xoffs[ii]+60,60)
for i in range(np.shape(ws)[0]):
if np.shape(x_range)[0] == np.shape(ws[i][:]-yoff*(ii/2))[0]:
ax_waveform.plot(x_range,ws[i][:]-yoff*(ii/2),alpha=0.05,color='#0066ff')
ax_waveform.plot(x_range,ws_bkd[i][:]-yoff*(ii/2),alpha=0.05,color='#c8c8c8')
if np.shape(x_range)[0] == np.shape(np.mean(ws,axis=0)-yoff*(ii/2))[0]:
if np.max(np.abs(np.mean(ws,axis=0)-yoff*(ii/2))) > signal:
signal = np.max(np.abs(np.mean(ws,axis=0)-yoff*(ii/2)))
noise = np.mean(ws_bkd) + np.std(ws_bkd)* 4.
ax_waveform.plot(x_range,np.mean(ws,axis=0)-yoff*(ii/2),color='#0066ff')
if ii > 3 and ii < 14:
for nn,axis in enumerate(nearby_trio):
if np.shape(x_range)[0] == np.shape(np.mean(ws,axis=0)-yoff*(ii/2))[0]:
neighbor_ws = load_waveforms(datapath,channel,np.random.choice(nearby_times[nn],100,replace=False),channels=channels)
ax_neighbor_waveforms.plot(x_range,np.mean(ws,axis=0)-(yoff/3.)*(ii/2),color='#0066ff')
ax_neighbor_waveforms.plot(x_range,np.mean(neighbor_ws,axis=0)-(yoff/3.)*(ii/2),color=neighbor_colors[nn],alpha=0.8)
#time plot
hist,edges = np.histogram(spike_times,bins=int(np.ceil(spike_times[-1] / timeplot_binsize)))
ax_time.plot(edges[1:],hist/float(timeplot_binsize),drawstyle='steps')
ax_time.set_xlabel('time (sec)')
ax_time.set_ylabel('firing rate (Hz)')
ax_time.set_title('firing rate over time')
max_spikes_to_plot = 2000
if len(spike_times) > max_spikes_to_plot:
times = np.random.choice(spike_times,max_spikes_to_plot,replace=False)
else:
times = spike_times
amps = get_spike_amplitudes(datapath,peak_y_channel,times,channels=channels)
ax_time_r = ax_time.twinx()
ax_time_r.plot(times,amps,'o',markersize=2,alpha=0.3)
ax_time_r.set_ylabel('amplitude')
ax_time_r.set_ylim(0,np.max(amps))
cleanAxes(ax_time,leftLabels=True,bottomLabels=True)
cleanAxes(ax_time_r,rightLabels=True,bottomLabels=True)
#for ACG display, calculate ISIs.
isis = np.diff(spike_times)
hist,edges = np.histogram(np.concatenate((isis,isis*-1)),bins=np.linspace(-0.5,0.5,100))
ax_ACG[0].plot(edges[:-1],hist,drawstyle='steps')
ax_ACG[0].set_xlim(-.5,.5)
ax_ACG[0].xaxis.set_ticks([-0.25,0.0,0.25])
ax_ACG[0].set_ylabel('spike count')
hist,edges = np.histogram(np.concatenate((isis,isis*-1)),bins=np.linspace(-0.02,0.02,160))
ax_ACG[1].plot(edges[:-1],hist,drawstyle='steps')
ax_ACG[1].set_xlim(-.02,.02)
ax_ACG[1].axvline(0.0015,ls='--',color='#ff8080');ax_ACG[1].axvline(-0.0015,ls='--',color='#ff8080')
ax_ACG[1].xaxis.set_ticks([-0.02,-0.01,0.0,0.01,0.02])
for axis in ax_ACG:
axis.set_xlabel('time (sec)')
#text info plot
cleanAxes(ax_text,total=True)
ax_text.text(0, 1, 'cluster: '+clusterID, fontsize=12,weight='bold')
#"%.2f" % signal
#ax_text.text(10, 30, 'amp.: '+"%.2f" % 1.0+'uV', fontsize=10)
#ax_text.text(10, 60, 'SNR: '+"%.2f" % float(df[df.clusterID==int(clusterID)].sn_max), fontsize=10)
#ax_text.text(50, 0, 'isolation distance: '+"%.2f" % float(df[df.clusterID==int(clusterID)].isolation_distance), fontsize=10)
#ax_text.text(50, 30, 'purity [mahalanobis]: '+"%.2f" % float(1-float(df[df.clusterID==int(clusterID)].mahalanobis_contamination)), fontsize=10)
#ax_text.text(50, 60, 'ISI violation rate: '+"%.2f" % float(1-float(df[df.clusterID==int(clusterID)].isi_purity)), fontsize=10)
ax_text.set_ylim(100,0)
ax_text.set_xlim(0,100)
return plt.gcf()
##===============================================================================
##===============================================================================
# %%
##===============================================================================
##===============================================================================
##===============================================================================
##===============================================================================
##===============================================================================
##OLD PLOTTING FUNCTIONS
##===============================================================================
##===============================================================================
##===============================================================================
##===============================================================================
def plot_quality(quality,isiV,labels):
(good,mua,unsorted) = labels
f,ax = plt.subplots(3,1,figsize=(4,6));
ax[0].plot(quality[1][np.where(np.in1d(quality[0],good))[0]],quality[2][np.where(np.in1d(quality[0],good))[0]],'o',label='good',mfc='g',mec='g');#plt.ylim(0,1)
ax[0].plot(quality[1][np.where(np.in1d(quality[0],mua))[0]],quality[2][np.where(np.in1d(quality[0],mua))[0]],'o',label='mua',mfc='r',mec='r');#plt.ylim(0,1)
ax[0].plot(quality[1][np.where(np.in1d(quality[0],unsorted))[0]],quality[2][np.where(np.in1d(quality[0],unsorted))[0]],'k.',label='unsorted',alpha=0.3);#plt.ylim(0,1)
ax[0].set_xlabel('iso distance');ax[0].set_ylabel('contamination from mahal.')
ax[0].set_ylim(ymin=0);ax[0].set_xlim(xmin=1.)
ax[0].set_xscale('log');
# if np.max(quality[1]) > 20:
# ax[0].set_xlim(1,20)
legend = ax[0].legend(loc='upper right', shadow=False, fontsize=10,numpoints=1)
ax[1].plot(quality[1][np.where(np.in1d(quality[0],good))[0]],isiV[1][np.where(np.in1d(isiV[0],good))[0]],'o',label='good',mfc='g',mec='g');plt.ylim(0,1)
ax[1].plot(quality[1][np.where(np.in1d(quality[0],mua))[0]],isiV[1][np.where(np.in1d(isiV[0],mua))[0]],'o',label='mua',mfc='r',mec='r');plt.ylim(0,1)
ax[1].plot(quality[1][np.where(np.in1d(quality[0],unsorted))[0]],isiV[1][np.where(np.in1d(isiV[0],unsorted))[0]],'k.',label='unsorted',alpha=0.3);plt.ylim(0,1)
ax[1].set_xlabel('iso distance');ax[1].set_ylabel('isi contamination')
ax[1].set_ylim(ymin=0);ax[1].set_xlim(xmin=1.)
ax[1].set_xscale('log');
# if np.max(quality[1]) > 20:
# ax[1].set_xlim(1,20)
legend = ax[1].legend(loc='upper right', shadow=False, fontsize=10,numpoints=1)
ax[2].plot(quality[2][np.where(np.in1d(quality[0],good))[0]],isiV[1][np.where(np.in1d(isiV[0],good))[0]],'o',label='good',mfc='g',mec='g');plt.ylim(0,1)
ax[2].plot(quality[2][np.where(np.in1d(quality[0],mua))[0]],isiV[1][np.where(np.in1d(isiV[0],mua))[0]],'o',label='mua',mfc='r',mec='r');plt.ylim(0,1)
ax[2].plot(quality[2][np.where(np.in1d(quality[0],unsorted))[0]],isiV[1][np.where(np.in1d(isiV[0],unsorted))[0]],'k.',label='unsorted',alpha=0.3);plt.ylim(0,1)
ax[2].set_ylabel('isi contamination');ax[2].set_xlabel('contamination from mahal.')
ax[2].set_ylim(ymin=0.01);ax[2].set_xlim(xmin=0.01)
ax[2].set_xscale('log');ax[2].set_yscale('log')
#legend = ax[2].legend(loc='upper left', shadow=False, fontsize=10,numpoints=1)
plt.tight_layout()
def neuron_fig2(clusterID,df,sortpath,filename='experiment1_102-0_0.dat',time_limits=None,timeplot_binsize=60.,neighbor_colors=["#67572e","#50a874","#ff4d4d"]):
channels = get_channel_count(sortpath)
site_positions=option234_positions[np.linspace(0,channels-1,channels).astype(int)]
data = load_phy_template(sortpath,site_positions)
cluster_ID = int(clusterID)
cluster_IDs = np.load(os.path.join(sortpath,'spike_clusters.npy'))
pc_data = np.load(os.path.join(sortpath,'pc_features.npy'))
pc_ind_data = np.load(os.path.join(sortpath,'pc_feature_ind.npy'))
params_path = os.path.join(sortpath,'params.py')
params = read_kilosort_params(params_path)
spike_times_data = np.load(os.path.join(sortpath,'spike_times.npy'))/ float(params['sample_rate'])
spike_templates = np.load(os.path.join(sortpath,'spike_templates.npy'))
datapath=os.path.join(sortpath,'experiment1_102-0_0.dat')
df_line = df[df.clusterID==int(clusterID)]
fig = plt.figure(figsize=(11,8.5))
ax_text = placeAxesOnGrid(fig,xspan=[0.0,0.4],yspan=[0,0.1])
ax_position = placeAxesOnGrid(fig,xspan=[0,0.1],yspan=[0.12,1.0])
ax_waveform = placeAxesOnGrid(fig,xspan=[0.2,0.45],yspan=[0.12,0.65])
ax_time = placeAxesOnGrid(fig,xspan=[0.2,1.0],yspan=[0.82,1.0])
ax_PCs = placeAxesOnGrid(fig,xspan=[0.5,0.8],yspan=[0,0.35])
ax_ACG = placeAxesOnGrid(fig,dim=[1,2],xspan=[0.55,1.0],yspan=[0.5,0.7])
#ax_neighbor_waveform_1 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.82,1.0],yspan=[0,0.13])
#ax_neighbor_waveform_2 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.82,1.0],yspan=[0.13,0.26])
#ax_neighbor_waveform_3 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.82,1.0],yspan=[0.26,0.39])
ax_neighbor_waveforms=placeAxesOnGrid(fig,dim=[1,1],xspan=[0.82,1.0],yspan=[0.0,0.39])#[ax_neighbor_waveform_1,ax_neighbor_waveform_2,ax_neighbor_waveform_3]
ax_CCGs_1 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.53,0.68],yspan=[0.36,0.48])
ax_CCGs_2 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.7,.85],yspan=[0.36,0.48])
ax_CCGs_3 = placeAxesOnGrid(fig,dim=[1,1],xspan=[0.86,1.0],yspan=[0.36,0.48])
ax_CCGs = [ax_CCGs_1,ax_CCGs_2,ax_CCGs_3]
#position plot
ax_position.imshow(data[clusterID]['waveform_weights'][::4],extent=(site_positions[:,0][::4][0],site_positions[:,0][::4][0]+16,site_positions[:,1][::4][0],site_positions[:,1][::4][-1]),cmap=plt.cm.gray_r,clim=(0,0.5),interpolation='none')
ax_position.imshow(data[clusterID]['waveform_weights'][1::4],extent=(site_positions[:,0][1::4][0],site_positions[:,0][1::4][0]+16,site_positions[:,1][1::4][0],site_positions[:,1][1::4][-1]),cmap=plt.cm.gray_r,clim=(0,0.5),interpolation='none')
ax_position.imshow(data[clusterID]['waveform_weights'][2::4],extent=(site_positions[:,0][2::4][0],site_positions[:,0][2::4][0]+16,site_positions[:,1][2::4][0],site_positions[:,1][2::4][-1]),cmap=plt.cm.gray_r,clim=(0,0.5),interpolation='none')
ax_position.imshow(data[clusterID]['waveform_weights'][3::4],extent=(site_positions[:,0][3::4][0],site_positions[:,0][3::4][0]+16,site_positions[:,1][3::4][0],site_positions[:,1][3::4][-1]),cmap=plt.cm.gray_r,clim=(0,0.5),interpolation='none')
ax_position.set_aspect(0.1)
ax_position.set_ylim(3840,0)
ax_position.set_xlim(70,0)
cleanAxes(ax_position)
ax_position.set_title('neuron position')
#time limits
if time_limits == None:
time_limits=[0,1e7]
all_spikes = data[clusterID]['times']
spike_times = all_spikes[np.where(all_spikes > time_limits[0])[0][0]:np.where(all_spikes < time_limits[1])[0][-1]]
#print len(all_spikes)
#print all_spikes[0]
#print all_spikes[-1]
##print np.where(all_spikes > time_limits[0])[0][0]
#print np.where(all_spikes < time_limits[1])[0][-1]
these_spikes = np.where(cluster_IDs==cluster_ID)[0][np.where(all_spikes > time_limits[0])[0][0]:np.where(all_spikes < time_limits[1])[0][-1]]
#spike_times = spike_times_data[these_spikes]
#for PC and CCG display, find close by clusters calculate
#PC plot
number_of_spikes_to_plot = 2000
these_templates=spike_templates[np.where(cluster_IDs==cluster_ID)[0]]
(included_templates,instances) = count_unique(these_templates)
this_template = included_templates[int(np.where(instances==np.max(instances))[0])]
ch1 = pc_ind_data[this_template][0]
ch2 = pc_ind_data[this_template][1]
ax_PCs.plot(pc_data[these_spikes][:number_of_spikes_to_plot,0,0],
pc_data[these_spikes][:number_of_spikes_to_plot,0,1],'bo',ms=1.5,markeredgewidth=0)
nearby_trio = [0,0,0];
nearby_euclids = [10000,10000,10000];
nearby_times=[]
for other_cluster in data.keys():
if other_cluster != clusterID:
if (np.abs(data[clusterID]['ypos']-data[other_cluster]['ypos']) + np.abs(data[clusterID]['xpos']-data[other_cluster]['xpos'])) < nearby_euclids[-1]:
rank = np.where((np.abs(data[clusterID]['ypos']-data[other_cluster]['ypos']) + np.abs(data[clusterID]['xpos']-data[other_cluster]['xpos'])) < nearby_euclids)[0][0]
nearby_euclids[rank] = (np.abs(data[clusterID]['ypos']-data[other_cluster]['ypos']) + np.abs(data[clusterID]['xpos']-data[other_cluster]['xpos']))
nearby_trio[rank]=other_cluster
print(nearby_trio)
for ii,neighbor in enumerate(nearby_trio):
all_spikes_neighbor = data[neighbor]['times']
indices_neighbor= np.where(cluster_IDs==int(neighbor))[0][np.where(all_spikes_neighbor > time_limits[0])[0][0]:np.where(all_spikes_neighbor < time_limits[1])[0][-1]]
neighbor_templates=spike_templates[indices_neighbor]
neighbor_spike_times = all_spikes_neighbor[np.where(all_spikes_neighbor > time_limits[0])[0][0]:np.where(all_spikes_neighbor < time_limits[1])[0][-1]]
nearby_times.append(neighbor_spike_times)
(included_templates,instances) = count_unique(neighbor_templates)
this_template = included_templates[int(np.where(instances==np.max(instances))[0])]
ch1_index = np.where(pc_ind_data[this_template] == ch1)[0]
ch2_index = np.where(pc_ind_data[this_template] == ch2)[0]
if ch2_index.size != 0 and ch1_index.size != 0:
ax_PCs.plot(pc_data[indices_neighbor][:number_of_spikes_to_plot,0,np.where(pc_ind_data[this_template] == ch1)[0][0]],
pc_data[indices_neighbor][:number_of_spikes_to_plot,0,np.where(pc_ind_data[this_template] == ch2)[0][0]],
'o',color=neighbor_colors[ii],ms=1,markeredgewidth=0,alpha=0.8)
all_diffs = []
for spike_time in spike_times:
try:
neighbor_start = np.where(neighbor_spike_times < spike_time - 0.5)[0][-1]
except:
neighbor_start = 0
try:
neighbor_end = np.where(neighbor_spike_times > spike_time + 0.5)[0][0]
except:
neighbor_end = -1
neighbor_chunk = neighbor_spike_times[neighbor_start:neighbor_end]
#print '\r'+str(spike_time)+' '+str(neighbor_start)+' - '+str(neighbor_end),
all_diffs.extend(neighbor_chunk - spike_time)
all_diffs=np.array(all_diffs).flatten()
hist,edges = np.histogram(all_diffs,bins=np.linspace(-0.2,0.2,400))
ax_CCGs[ii].plot(edges[:-1],hist,drawstyle='steps',color=neighbor_colors[ii])
ax_CCGs[ii].set_xlim(-.2,.2)
ax_CCGs[ii].xaxis.set_ticks([-0.075,0.0,0.075])
ax_CCGs[ii].axvline(0.0,ls='--',color='#ff8080')
cleanAxes(ax_CCGs[ii],total=True)
cleanAxes(ax_neighbor_waveforms,total=True)
cleanAxes(ax_PCs)
ax_PCs.set_title('PC features')
#waveform plot
cleanAxes(ax_waveform,total=True)
ax_waveform.set_title('waveform')
channel_offset = 0 # should not be 0 if not starting from tip.
peak_y_channel = np.where(data[clusterID]['waveform_weights'] == np.max(data[clusterID]['waveform_weights']))[0][0]
times = np.random.choice(spike_times,100,replace=False)
random_times = np.random.rand(100) * (np.max(spike_times)-np.min(spike_times)) + np.min(spike_times)
xoffs=[0,100,50,150,0,100,50,150,0,100,50,150,0,100,50,150,0,100][::-1]
yoff=1500
signal = 0;
for ii,channel in enumerate(np.linspace(peak_y_channel-8,peak_y_channel+10,18)):
ws = load_waveforms(datapath,channel,times,channels=channels)
ws_bkd = load_waveforms(datapath,channel,random_times,channels=channels)
x_range = np.linspace(xoffs[ii],xoffs[ii]+60,60)
for i in range(np.shape(ws)[0]):
if np.shape(x_range)[0] == np.shape(ws[i][:]-yoff*(ii/2))[0]:
ax_waveform.plot(x_range,ws[i][:]-yoff*(ii/2),alpha=0.05,color='#0066ff')
ax_waveform.plot(x_range,ws_bkd[i][:]-yoff*(ii/2),alpha=0.05,color='#c8c8c8')
if np.shape(x_range)[0] == np.shape(np.mean(ws,axis=0)-yoff*(ii/2))[0]:
if np.max(np.abs(np.mean(ws,axis=0)-yoff*(ii/2))) > signal:
signal = np.max(np.abs(np.mean(ws,axis=0)-yoff*(ii/2)))
noise = np.mean(ws_bkd) + np.std(ws_bkd)* 4.
ax_waveform.plot(x_range,np.mean(ws,axis=0)-yoff*(ii/2),color='#0066ff')
if ii > 3 and ii < 14:
for nn,axis in enumerate(nearby_trio):
if np.shape(x_range)[0] == np.shape(
|
np.mean(ws,axis=0)
|
numpy.mean
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Load mini-imagenet-dataset from hard drive.
"""
import paddle
from paddle.io import Dataset
import numpy as np
import cv2
import os
import collections
import sys
sys.path.append("../..")
from paddle.vision import transforms as T
class MiniImageNet1(Dataset):
"""
Implementation of Mini-ImageNet dataset.
Args:
mode(str): 'train', 'test', or 'val' mode. Default 'test'.
transform: function to call to preprocess images.
Returns:
Dataset: Mini-Imagenet Dataset.
Examples:
.. code-block:: python
from miniimagenet import MINIIMAGENET
test_dataset = MINIIMAGENET(mode='test')
test_loader = paddle.io.DataLoader(test_dataset, batch_size=1, shuffle=False)
for id, data in enumerate(test_loader):
image = data[0]
label = data[1]
"""
def __init__(self,
mode='test',
transform=None):
self.transform = transform
# TODO: make it more general
parent_folder = '../dataset/mini-imagenet1/mini-imagenet'
supported_mode = ('train', 'val', 'test')
assert mode in supported_mode
subclass_folders = os.listdir(parent_folder)
self.image_2_label = []
# TODO: shrink this...
for i, folder in enumerate(subclass_folders):
if mode == 'train':
image_folder = os.path.join(parent_folder, folder, mode)
image_filenames = os.listdir(image_folder)
for image_filename in image_filenames:
image_path = os.path.join(image_folder, image_filename)
self.image_2_label.append((image_path, i))
# mix train & val together.
mode = 'val'
image_folder = os.path.join(parent_folder, folder, mode)
image_filenames = os.listdir(image_folder)
for image_filename in image_filenames:
image_path = os.path.join(image_folder, image_filename)
self.image_2_label.append((image_path, i))
else:
mode = 'test'
image_folder = os.path.join(parent_folder, folder, mode)
image_filenames = os.listdir(image_folder)
for image_filename in image_filenames:
image_path = os.path.join(image_folder, image_filename)
self.image_2_label.append((image_path, i))
def __getitem__(self, idx):
image_filepath, label = self.image_2_label[idx]
reading_success = False
while not reading_success:
try:
image = cv2.imread(image_filepath)
if self.transform is not None:
# TODO: fix bug here.
transformed_image = self.transform(image)
else:
transformed_image = image
reading_success = True
except Exception as e:
print(e)
import pdb
pdb.set_trace()
return transformed_image, label
def __len__(self):
return len(self.image_2_label)
def main():
"""
Main for running a tutorial for ImageNet.
Returns:
None
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
# Set the testing set
transform_train = T.Compose([T.Resize((224, 224)),
T.RandomHorizontalFlip(0.5),
T.RandomVerticalFlip(0.5),
T.Transpose(),
T.Normalize(
mean=[0, 0, 0],
std=[255, 255, 255]),
T.Normalize(mean, std, data_format='CHW')
])
transform_eval = T.Compose([T.Resize((224, 224)),
T.Transpose(),
T.Normalize(
mean=[0, 0, 0],
std=[255, 255, 255]),
T.Normalize(mean, std, data_format='CHW')
])
# Set the classification network
model1 = paddle.vision.models.resnet50(pretrained=True)
model2 = paddle.vision.models.vgg16(pretrained=True)
model7 = paddle.vision.models.mobilenet_v1(pretrained=True)
train_set = MiniImageNet1(mode='train', transform=transform_train)
test_set = MiniImageNet1(mode='test', transform=transform_eval)
train_loader = paddle.io.DataLoader(train_set, batch_size=16, shuffle=True)
test_loader = paddle.io.DataLoader(test_set, batch_size=16, shuffle=True)
train_label_dict = collections.defaultdict(int)
test_label_dict = collections.defaultdict(int)
for index, data in enumerate(train_loader):
images = data[0]
labels = data[1]
for label in labels:
train_label_dict[label] += 1
predict1 = model1(images)
predict2 = model2(images)
predict7 = model7(images)
label1 = np.argmax(predict1, axis=1)
label2 =
|
np.argmax(predict2, axis=1)
|
numpy.argmax
|
'''
<NAME>
(+) added customized outputs: flow_fwd, flow_bwd, segmentation mask (src/tgt), instance mask (src/tgt)
(+) added recursive_check_nonzero_inst
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import numpy as np
from imageio import imread
from path import Path
import random
import math
from matplotlib import pyplot as plt
from flow_io import flow_read
from rigid_warp import flow_warp
import pdb
def load_as_float(path):
return imread(path).astype(np.float32)
def load_flo_as_float(path):
out = np.array(flow_read(path)).astype(np.float32)
return out
def load_seg_as_float(path):
out = np.load(path).astype(np.float32)
return out
def L2_norm(x, dim=1, keepdim=True):
curr_offset = 1e-10
l2_norm = torch.norm(torch.abs(x) + curr_offset, dim=dim, keepdim=True)
return l2_norm
def find_noc_masks(fwd_flow, bwd_flow):
'''
fwd_flow: torch.size([1, 2, 256, 832])
bwd_flow: torch.size([1, 2, 256, 832])
output: torch.size([1, 1, 256, 832]), torch.size([1, 1, 256, 832])
input shape of flow_warp(): torch.size([bs, 2, 256, 832])
'''
bwd2fwd_flow, _ = flow_warp(bwd_flow, fwd_flow)
fwd2bwd_flow, _ = flow_warp(fwd_flow, bwd_flow)
fwd_flow_diff = torch.abs(bwd2fwd_flow + fwd_flow)
bwd_flow_diff = torch.abs(fwd2bwd_flow + bwd_flow)
fwd_consist_bound = torch.max(0.05 * L2_norm(fwd_flow), torch.Tensor([3.0]))
bwd_consist_bound = torch.max(0.05 * L2_norm(bwd_flow), torch.Tensor([3.0]))
noc_mask_0 = (L2_norm(fwd_flow_diff) < fwd_consist_bound).type(torch.FloatTensor) # noc_mask_tgt, torch.Size([1, 1, 256, 832]), torch.float32
noc_mask_1 = (L2_norm(bwd_flow_diff) < bwd_consist_bound).type(torch.FloatTensor) # noc_mask_src, torch.Size([1, 1, 256, 832]), torch.float32
# pdb.set_trace()
return noc_mask_0, noc_mask_1
def inst_iou(seg_src, seg_tgt, valid_mask):
'''
=> Which channel instance of seg_tgt does the instances of seg_src match?
=> seg_src의 인스턴스들이 seg_tgt의 몇 번째 채널 인스턴스에 매칭되는가?
seg_src: torch.Size([1, n_inst, 256, 832])
seg_tgt: torch.Size([1, n_inst, 256, 832])
valid_mask: torch.Size([1, 1, 256, 832])
'''
n_inst_src = seg_src.shape[1]
n_inst_tgt = seg_tgt.shape[1]
seg_src_m = seg_src * valid_mask.repeat(1,n_inst_src,1,1)
seg_tgt_m = seg_tgt * valid_mask.repeat(1,n_inst_tgt,1,1)
# pdb.set_trace()
'''
plt.figure(1), plt.imshow(seg_src.sum(dim=0).sum(dim=0)), plt.colorbar(), plt.ion(), plt.show()
plt.figure(2), plt.imshow(seg_tgt.sum(dim=0).sum(dim=0)), plt.colorbar(), plt.ion(), plt.show()
plt.figure(3), plt.imshow(valid_mask[0,0]), plt.colorbar(), plt.ion(), plt.show()
plt.figure(4), plt.imshow(seg_src_m.sum(dim=0).sum(dim=0)), plt.colorbar(), plt.ion(), plt.show()
'''
for i in range(n_inst_src):
if i == 0:
match_table = torch.from_numpy(np.zeros([1,n_inst_tgt]).astype(np.float32))
continue;
overl = (seg_src_m[:,i].unsqueeze(1).repeat(1,n_inst_tgt,1,1) * seg_tgt_m).clamp(min=0,max=1).squeeze(0).sum(1).sum(1)
union = (seg_src_m[:,i].unsqueeze(1).repeat(1,n_inst_tgt,1,1) + seg_tgt_m).clamp(min=0,max=1).squeeze(0).sum(1).sum(1)
iou_inst = overl / union
match_table = torch.cat((match_table, iou_inst.unsqueeze(0)), dim=0)
iou, inst_idx = torch.max(match_table,dim=1)
# pdb.set_trace()
return iou, inst_idx
def recursive_check_nonzero_inst(tgt_inst, ref_inst):
assert( tgt_inst[0].mean() == ref_inst[0].mean() )
n_inst = int(tgt_inst[0].mean())
for nn in range(n_inst):
if tgt_inst[nn+1].mean() == 0:
tgt_inst[0] -= 1
ref_inst[0] -= 1
if nn+1 == n_inst:
tgt_inst[nn+1:] = 0
ref_inst[nn+1:] = 0
else:
tgt_inst[nn+1:] = torch.cat([tgt_inst[nn+2:], torch.zeros(1, tgt_inst.size(1), tgt_inst.size(2))], dim=0) # re-ordering
ref_inst[nn+1:] = torch.cat([ref_inst[nn+2:], torch.zeros(1, ref_inst.size(1), ref_inst.size(2))], dim=0) # re-ordering
return recursive_check_nonzero_inst(tgt_inst, ref_inst)
if ref_inst[nn+1].mean() == 0:
tgt_inst[0] -= 1
ref_inst[0] -= 1
if nn+1 == n_inst:
tgt_inst[nn+1:] = 0
ref_inst[nn+1:] = 0
else:
tgt_inst[nn+1:] = torch.cat([tgt_inst[nn+2:], torch.zeros(1, tgt_inst.size(1), tgt_inst.size(2))], dim=0) # re-ordering
ref_inst[nn+1:] = torch.cat([ref_inst[nn+2:], torch.zeros(1, ref_inst.size(1), ref_inst.size(2))], dim=0) # re-ordering
return recursive_check_nonzero_inst(tgt_inst, ref_inst)
return tgt_inst, ref_inst
class SequenceFolder(data.Dataset):
"""
A sequence data loader where the files are arranged in this way:
root/scene_1/0000000.jpg
root/scene_1/0000001.jpg
..
root/scene_1/cam.txt
root/scene_2/0000000.jpg
.
transform functions must take in a list a images and a numpy array (usually intrinsics matrix)
"""
def __init__(self, root, train, seed=None, shuffle=True, max_num_instances=20, sequence_length=3, transform=None, proportion=1, begin_idx=None):
np.random.seed(seed)
random.seed(seed)
self.root = Path(root)
scene_list_path = self.root/'train.txt' if train else self.root/'val.txt'
self.scenes = [self.root/'image'/folder[:-1] for folder in open(scene_list_path)]
self.is_shuffle = shuffle
self.crawl_folders(sequence_length)
self.mni = max_num_instances
self.transform = transform
split_index = int(math.floor(len(self.samples)*proportion))
self.samples = self.samples[:split_index]
if begin_idx:
self.samples = self.samples[begin_idx:]
# pdb.set_trace()
def crawl_folders(self, sequence_length):
sequence_set = []
demi_length = (sequence_length-1)//2
shifts = list(range(-demi_length, demi_length + 1))
shifts.pop(demi_length)
for scene in self.scenes:
sceneff = Path(Path.dirname(scene).parent+'/flow_f/'+scene.split('/')[-1])
scenefb = Path(Path.dirname(scene).parent+'/flow_b/'+scene.split('/')[-1])
scenei = Path(Path.dirname(scene).parent+'/segmentation/'+scene.split('/')[-1])
intrinsics = np.genfromtxt(scene/'cam.txt').astype(np.float32).reshape((3, 3))
imgs = sorted(scene.files('*.jpg'))
flof = sorted(sceneff.files('*.flo')) # 00: src, 01: tgt
flob = sorted(scenefb.files('*.flo')) # 00: tgt, 01: src
segm = sorted(scenei.files('*.npy'))
if len(imgs) < sequence_length:
continue
for i in range(demi_length, len(imgs)-demi_length):
sample = {'intrinsics': intrinsics, 'tgt': imgs[i], 'ref_imgs': [],
'flow_fs':[], 'flow_bs':[], 'tgt_seg':segm[i], 'ref_segs':[]} # ('tgt_insts':[], 'ref_insts':[]) will be processed when getitem() is called
for j in shifts:
sample['ref_imgs'].append(imgs[i+j])
sample['ref_segs'].append(segm[i+j])
for j in range(-demi_length, 1):
sample['flow_fs'].append(flof[i+j])
sample['flow_bs'].append(flob[i+j])
sequence_set.append(sample)
# pdb.set_trace()
if self.is_shuffle:
random.shuffle(sequence_set)
self.samples = sequence_set
def __getitem__(self, index):
sample = self.samples[index]
tgt_img = load_as_float(sample['tgt'])
ref_imgs = [load_as_float(ref_img) for ref_img in sample['ref_imgs']]
flow_fs = [torch.from_numpy(load_flo_as_float(flow_f)) for flow_f in sample['flow_fs']]
flow_bs = [torch.from_numpy(load_flo_as_float(flow_b)) for flow_b in sample['flow_bs']]
tgt_seg = torch.from_numpy(load_seg_as_float(sample['tgt_seg']))
ref_segs = [torch.from_numpy(load_seg_as_float(ref_seg)) for ref_seg in sample['ref_segs']]
tgt_sort = torch.cat([torch.zeros(1).long(), tgt_seg.sum(dim=(1,2)).argsort(descending=True)[:-1]], dim=0)
ref_sorts = [ torch.cat([torch.zeros(1).long(), ref_seg.sum(dim=(1,2)).argsort(descending=True)[:-1]], dim=0) for ref_seg in ref_segs ]
tgt_seg = tgt_seg[tgt_sort]
ref_segs = [ref_seg[ref_sort] for ref_seg, ref_sort in zip(ref_segs, ref_sorts)]
tgt_insts = []
ref_insts = []
for i in range( len(ref_imgs) ):
noc_f, noc_b = find_noc_masks(flow_fs[i].unsqueeze(0), flow_bs[i].unsqueeze(0))
if i < len(ref_imgs)/2: # first half
seg0 = ref_segs[i].unsqueeze(0)
seg1 = tgt_seg.unsqueeze(0)
else: # second half
seg0 = tgt_seg.unsqueeze(0)
seg1 = ref_segs[i].unsqueeze(0)
seg0w, _ = flow_warp(seg1, flow_fs[i].unsqueeze(0))
seg1w, _ = flow_warp(seg0, flow_bs[i].unsqueeze(0))
n_inst0 = seg0.shape[1]
n_inst1 = seg1.shape[1]
### Warp seg0 to seg1. Find IoU between seg1w and seg1. Find the maximum corresponded instance in seg1.
iou_01, ch_01 = inst_iou(seg1w, seg1, valid_mask=noc_b)
iou_10, ch_10 = inst_iou(seg0w, seg0, valid_mask=noc_f)
seg0_re = torch.zeros(self.mni+1, seg0.shape[2], seg0.shape[3])
seg1_re = torch.zeros(self.mni+1, seg1.shape[2], seg1.shape[3])
non_overlap_0 = torch.ones([seg0.shape[2], seg0.shape[3]])
non_overlap_1 = torch.ones([seg0.shape[2], seg0.shape[3]])
num_match = 0
for ch in range(n_inst0):
condition1 = (ch == ch_10[ch_01[ch]]) and (iou_01[ch] > 0.5) and (iou_10[ch_01[ch]] > 0.5)
condition2 = ((seg0[0,ch] * non_overlap_0).max() > 0) and ((seg1[0,ch_01[ch]] * non_overlap_1).max() > 0)
if condition1 and condition2 and (num_match < self.mni): # matching success!
num_match += 1
seg0_re[num_match] = seg0[0,ch] * non_overlap_0
seg1_re[num_match] = seg1[0,ch_01[ch]] * non_overlap_1
non_overlap_0 = non_overlap_0 * (1 - seg0_re[num_match])
non_overlap_1 = non_overlap_1 * (1 - seg1_re[num_match])
seg0_re[0] = num_match
seg1_re[0] = num_match
# pdb.set_trace()
if seg0_re[0].mean() != 0 and seg0_re[int(seg0_re[0].mean())].mean() == 0: pdb.set_trace()
if seg1_re[0].mean() != 0 and seg1_re[int(seg1_re[0].mean())].mean() == 0: pdb.set_trace()
if i < len(ref_imgs)/2: # first half
tgt_insts.append(seg1_re.detach().cpu().numpy().transpose(1,2,0))
ref_insts.append(seg0_re.detach().cpu().numpy().transpose(1,2,0))
else: # second half
tgt_insts.append(seg0_re.detach().cpu().numpy().transpose(1,2,0))
ref_insts.append(seg1_re.detach().cpu().numpy().transpose(1,2,0))
# pdb.set_trace()
'''
plt.close('all')
plt.figure(1), plt.imshow(tgt_insts[0].sum(dim=0)), plt.grid(linestyle=':', linewidth=0.4), plt.colorbar(), plt.ion(), plt.show()
plt.figure(2), plt.imshow(tgt_insts[1].sum(dim=0)), plt.grid(linestyle=':', linewidth=0.4), plt.colorbar(), plt.ion(), plt.show()
plt.figure(3), plt.imshow(ref_insts[0].sum(dim=0)), plt.grid(linestyle=':', linewidth=0.4), plt.colorbar(), plt.ion(), plt.show()
plt.figure(4), plt.imshow(ref_insts[1].sum(dim=0)), plt.grid(linestyle=':', linewidth=0.4), plt.colorbar(), plt.ion(), plt.show()
'''
if self.transform is not None:
imgs, segms, intrinsics = self.transform([tgt_img] + ref_imgs, tgt_insts + ref_insts, np.copy(sample['intrinsics']))
tgt_img = imgs[0]
ref_imgs = imgs[1:]
tgt_insts = segms[:int(len(ref_imgs)/2+1)]
ref_insts = segms[int(len(ref_imgs)/2+1):]
else:
intrinsics = np.copy(sample['intrinsics'])
### While passing through RandomScaleCrop(), instances could be flied-out and become zero-mask. -> Need filtering!
for sq in range( len(ref_imgs) ):
tgt_insts[sq], ref_insts[sq] = recursive_check_nonzero_inst(tgt_insts[sq], ref_insts[sq])
if tgt_insts[0][0].mean() != 0 and tgt_insts[0][int(tgt_insts[0][0].mean())].mean() == 0: pdb.set_trace()
if tgt_insts[1][0].mean() != 0 and tgt_insts[1][int(tgt_insts[1][0].mean())].mean() == 0: pdb.set_trace()
if ref_insts[0][0].mean() != 0 and ref_insts[0][int(ref_insts[0][0].mean())].mean() == 0: pdb.set_trace()
if ref_insts[1][0].mean() != 0 and ref_insts[1][int(ref_insts[1][0].mean())].mean() == 0: pdb.set_trace()
if tgt_insts[0][0].mean() != tgt_insts[0][1:].mean(-1).mean(-1).nonzero().size(0): pdb.set_trace()
if tgt_insts[1][0].mean() != tgt_insts[1][1:].mean(-1).mean(-1).nonzero().size(0): pdb.set_trace()
if ref_insts[0][0].mean() != ref_insts[0][1:].mean(-1).mean(-1).nonzero().size(0): pdb.set_trace()
if ref_insts[1][0].mean() != ref_insts[1][1:].mean(-1).mean(-1).nonzero().size(0): pdb.set_trace()
# pdb.set_trace()
return tgt_img, ref_imgs, intrinsics,
|
np.linalg.inv(intrinsics)
|
numpy.linalg.inv
|
import pytest
import numpy as np
from sosfilt import _zpk_funcs
class TestCplxReal:
def test_trivial_input(self):
np.testing.assert_equal(
_zpk_funcs._cplxreal(np.array([], dtype=np.complex_)), ([], [])
)
np.testing.assert_equal(
_zpk_funcs._cplxreal(np.array([1], dtype=np.complex_)), ([], [1])
)
def test_output_order(self):
zc, zr = _zpk_funcs._cplxreal(np.roots(np.array([1, 0, 0, 1])))
np.testing.assert_allclose(
np.append(zc, zr), [1 / 2 + 1j * np.sin(np.pi / 3), -1]
)
eps = np.spacing(1)
a = [
0 + 1j,
0 - 1j,
eps + 1j,
eps - 1j,
-eps + 1j,
-eps - 1j,
1,
4,
2,
3,
0,
0,
2 + 3j,
2 - 3j,
1 - eps + 1j,
1 + 2j,
1 - 2j,
1 + eps - 1j, # sorts out of order
3 + 1j,
3 + 1j,
3 + 1j,
3 - 1j,
3 - 1j,
3 - 1j,
2 - 3j,
2 + 3j,
]
zc, zr = _zpk_funcs._cplxreal(np.array(a))
np.testing.assert_allclose(
zc, [1j, 1j, 1j, 1 + 1j, 1 + 2j, 2 + 3j, 2 + 3j, 3 + 1j, 3 + 1j, 3 + 1j]
)
np.testing.assert_allclose(zr, [0, 0, 1, 2, 3, 4])
z = np.array(
[
1 - eps + 1j,
1 + 2j,
1 - 2j,
1 + eps - 1j,
1 + eps + 3j,
1 - 2 * eps - 3j,
0 + 1j,
0 - 1j,
2 + 4j,
2 - 4j,
2 + 3j,
2 - 3j,
3 + 7j,
3 - 7j,
4 - eps + 1j,
4 + eps - 2j,
4 - 1j,
4 - eps + 2j,
]
)
zc, zr = _zpk_funcs._cplxreal(z)
np.testing.assert_allclose(
zc, [1j, 1 + 1j, 1 + 2j, 1 + 3j, 2 + 3j, 2 + 4j, 3 + 7j, 4 + 1j, 4 + 2j]
)
np.testing.assert_equal(zr, [])
def test_unmatched_conjugates(self):
with pytest.raises(ValueError):
_zpk_funcs._cplxreal(np.array([1 + 3j, 1 - 3j, 1 + 2j]))
with pytest.raises(ValueError):
_zpk_funcs._cplxreal(np.array([1 + 3j]))
with pytest.raises(ValueError):
_zpk_funcs._cplxreal(np.array([1 - 3j]))
class TestZpk2Sos:
def sos2zpk(self, z, p, k):
return _zpk_funcs.zpk2sos_multiple(
z[:, None], p[:, None], np.array([k], dtype=np.float64)
)[0, ...]
def test_basic(self):
z = np.array([-1.0, -1.0], dtype=np.complex_)
p = np.array([0.57149 + 0.29360j, 0.57149 - 0.29360j])
k = 1
sos = self.sos2zpk(z, p, k)
sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]]
|
np.testing.assert_array_almost_equal(sos, sos2, decimal=4)
|
numpy.testing.assert_array_almost_equal
|
# -*- coding: utf-8 -*-
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from tsam.utils.durationRepresentation import durationRepresentation
def representations(
candidates,
clusterOrder,
default,
representationMethod=None,
representationDict=None,
distributionPeriodWise=True,
timeStepsPerPeriod=None,
):
clusterCenterIndices = None
if representationMethod is None:
representationMethod = default
if representationMethod == "meanRepresentation":
clusterCenters = meanRepresentation(candidates, clusterOrder)
elif representationMethod == "medoidRepresentation":
clusterCenters, clusterCenterIndices = medoidRepresentation(
candidates, clusterOrder
)
elif representationMethod == "maxoidRepresentation":
clusterCenters, clusterCenterIndices = maxoidRepresentation(
candidates, clusterOrder
)
elif representationMethod == "minmaxmeanRepresentation":
clusterCenters = minmaxmeanRepresentation(
candidates, clusterOrder, representationDict, timeStepsPerPeriod
)
elif representationMethod == "durationRepresentation" or representationMethod == "distributionRepresentation":
clusterCenters = durationRepresentation(
candidates, clusterOrder, distributionPeriodWise, timeStepsPerPeriod, representMinMax=False,
)
elif representationMethod == "distributionAndMinMaxRepresentation":
clusterCenters = durationRepresentation(
candidates, clusterOrder, distributionPeriodWise, timeStepsPerPeriod, representMinMax=True,
)
else:
raise ValueError("Chosen 'representationMethod' does not exist.")
return clusterCenters, clusterCenterIndices
def maxoidRepresentation(candidates, clusterOrder):
"""
Represents the candidates of a given cluster group (clusterOrder)
by its medoid, measured with the euclidean distance.
:param candidates: Dissimilarity matrix where each row represents a candidate. required
:type candidates: np.ndarray
:param clusterOrder: Integer array where the index refers to the candidate and the
Integer entry to the group. required
:type clusterOrder: np.array
"""
# set cluster member that is farthest away from the points of the other clusters as maxoid
clusterCenters = []
clusterCenterIndices = []
for clusterNum in np.unique(clusterOrder):
indice = np.where(clusterOrder == clusterNum)
innerDistMatrix = euclidean_distances(candidates, candidates[indice])
mindistIdx = np.argmax(innerDistMatrix.sum(axis=0))
clusterCenters.append(candidates[indice][mindistIdx])
clusterCenterIndices.append(indice[0][mindistIdx])
return clusterCenters, clusterCenterIndices
def medoidRepresentation(candidates, clusterOrder):
"""
Represents the candidates of a given cluster group (clusterOrder)
by its medoid, measured with the euclidean distance.
:param candidates: Dissimilarity matrix where each row represents a candidate. required
:type candidates: np.ndarray
:param clusterOrder: Integer array where the index refers to the candidate and the
Integer entry to the group. required
:type clusterOrder: np.array
"""
# set cluster center as medoid
clusterCenters = []
clusterCenterIndices = []
for clusterNum in np.unique(clusterOrder):
indice = np.where(clusterOrder == clusterNum)
innerDistMatrix = euclidean_distances(candidates[indice])
mindistIdx = np.argmin(innerDistMatrix.sum(axis=0))
clusterCenters.append(candidates[indice][mindistIdx])
clusterCenterIndices.append(indice[0][mindistIdx])
return clusterCenters, clusterCenterIndices
def meanRepresentation(candidates, clusterOrder):
"""
Represents the candidates of a given cluster group (clusterOrder)
by its mean.
:param candidates: Dissimilarity matrix where each row represents a candidate. required
:type candidates: np.ndarray
:param clusterOrder: Integer array where the index refers to the candidate and the
Integer entry to the group. required
:type clusterOrder: np.array
"""
# set cluster centers as means of the group candidates
clusterCenters = []
for clusterNum in
|
np.unique(clusterOrder)
|
numpy.unique
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
# Brief: rendering views from a given map at arbitrary 3d locations.
from typing import Dict
import os
import math
import numpy as np
from map_renderer import Mesh
import OpenGL.GL as gl
from map_renderer.glow import GlBuffer, GlProgram, GlShader, GlFramebuffer, GlRenderbuffer, GlTexture2D, vec4, vec3
import map_renderer.glow as glow
glow.WARN_INVALID_UNIFORMS = True
def glPerspective(fov, aspect, z_near, z_far):
""" generate perspective matrix.
For more details see https://www.opengl.org/sdk/docs/man2/xhtml/gluPerspective.xml
"""
M = np.zeros((4, 4), dtype=np.float32)
# Copied from gluPerspective
f = 1.0 / math.tan(0.5 * fov)
M[0, 0] = f / aspect
M[1, 1] = f
M[2, 2] = (z_near + z_far) / (z_near - z_far)
M[2, 3] = (2.0 * z_far * z_near) / (z_near - z_far)
M[3, 2] = -1.0
return M
def normalize(vec: np.array):
""" normalize. """
length = math.sqrt(np.dot(vec, vec))
if abs(length) < 0.0000001: return vec
return vec / length
def lookAt(x_cam, y_cam, z_cam, x_ref, y_ref, z_ref):
""" generate view matrix. """
# determine rotation from current location:
pos_cam = vec3(x_cam, y_cam, z_cam)
pos = vec3(x_ref, y_ref, z_ref)
up = vec3(0.0, 1.0, 0.0)
f = normalize(pos - pos_cam)
x_axis = normalize(np.cross(f, up))
y_axis = normalize(np.cross(x_axis, f))
z_axis = -f
view_matrix = np.zeros((4, 4), dtype=np.float32)
view_matrix[0, :3] = x_axis
view_matrix[1, :3] = y_axis
view_matrix[2, :3] = z_axis
view_matrix[3, 3] = 1.0
# effectively => R * T
view_matrix[0, 3] = np.dot(-pos_cam, x_axis)
view_matrix[1, 3] = np.dot(-pos_cam, y_axis)
view_matrix[2, 3] = np.dot(-pos_cam, z_axis)
return view_matrix
class MapRenderer:
""" rendering views from a given map at arbitrary 3d locations. """
def __init__(self, params: Dict):
"""
Args:
params[Dict]: the parameter that are used to render: width, height, fov_up, fov_down, min_depth, max_depth
"""
self._width = int(params["width"])
self._height = int(params["height"])
self._mesh = None
current_directory = os.path.abspath(os.path.join(os.path.dirname(__file__)))
self._render_program = GlProgram()
self._render_program.attach(
GlShader.from_file(gl.GL_VERTEX_SHADER, os.path.join(current_directory, "shader/render_mesh.vert")))
self._render_program.attach(
GlShader.from_file(gl.GL_GEOMETRY_SHADER, os.path.join(current_directory, "shader/render_mesh.geom")))
self._render_program.attach(
GlShader.from_file(gl.GL_FRAGMENT_SHADER, os.path.join(current_directory, "shader/render_mesh.frag")))
self._render_program.link()
self._render_program.bind()
self._render_program["fov_up"] = float(params["fov_up"])
self._render_program["fov_down"] = float(params["fov_down"])
self._render_program["min_depth"] = float(params["min_range"])
self._render_program["max_depth"] = float(params["max_range"])
self._render_program.release()
self._draw_program = GlProgram()
self._draw_program.attach(
GlShader.from_file(gl.GL_VERTEX_SHADER, os.path.join(current_directory, "shader/draw_mesh.vert")))
self._draw_program.attach(
GlShader.from_file(gl.GL_FRAGMENT_SHADER, os.path.join(current_directory, "shader/draw_mesh.frag")))
self._draw_program.link()
self._draw_program.bind()
def set_uniform(program: GlProgram, name: str, value):
loc = gl.glGetUniformLocation(program.id, name)
if isinstance(value, float):
gl.glUniform1f(loc, value)
elif isinstance(value, np.ndarray):
if value.shape[0] == 4:
gl.glUniform4fv(loc, 1, value)
elif value.shape[0] == 3:
gl.glUniform3fv(loc, 1, value)
else:
raise NotImplementedError("implement.")
set_uniform(self._draw_program, "lights[0].position", vec4(0, 0, -1, 0))
set_uniform(self._draw_program, "lights[0].ambient", vec3(.01, .01, .01))
set_uniform(self._draw_program, "lights[0].diffuse", vec3(.9, .9, .9))
set_uniform(self._draw_program, "lights[0].specular", vec3(.9, .9, .9))
# more evenly distributed sun light...
dirs = [vec4(1, -1, 1, 0), vec4(-1, -1, 1, 0), vec4(1, -1, -1, 0), vec4(-1, -1, -1, 0)]
indirect_intensity = vec3(.1, .1, .1)
for i, direction in enumerate(dirs):
light_name = "lights[{}]".format(i + 1)
set_uniform(self._draw_program, light_name + ".position", direction)
set_uniform(self._draw_program, light_name + ".ambient", vec3(.01, .01, .01))
set_uniform(self._draw_program, light_name + ".diffuse", indirect_intensity)
set_uniform(self._draw_program, light_name + ".specular", indirect_intensity)
set_uniform(self._draw_program, "material.ambient", vec3(0.9, 0.9, 0.9))
set_uniform(self._draw_program, "material.diffuse", vec3(0.0, 0.0, 0.9))
set_uniform(self._draw_program, "material.specular", vec3(0.0, 0.0, 0.0))
set_uniform(self._draw_program, "material.emission", vec3(0.0, 0.0, 0.0))
set_uniform(self._draw_program, "material.shininess", 1.0)
set_uniform(self._draw_program, "material.alpha", 1.0)
self._draw_program.release()
self._draw_program["num_lights"] = 5
self._draw_program["model_mat"] = np.identity(4, dtype=np.float32)
self._draw_program["normal_mat"] =
|
np.identity(4, dtype=np.float32)
|
numpy.identity
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 12:44:30 2020
@author: niklas
"""
import os
import imageio
import tifffile
from pathlib import Path
import numpy as np
from bridson import poisson_disc_samples
def get_affinities(labels):
shape = labels.shape
# we use three channels, to save as rgb
affinities = np.zeros((shape[0] - 1, shape[1] - 1, 3))
top_affinity = (labels[:-1, :-1] - labels[1:, :-1]) == 0
right_affinity = (labels[:-1, :-1] - labels[:-1, 1:]) == 0
affinities[..., 0] = top_affinity
affinities[..., 1] = right_affinity
return affinities
def toy_circle_labels_affinities(
width, height, radius,
intensity_prob,
noise_func
):
# affinity maps have -1 extents, so we build images with +1 extents
height = height + 1
width = width + 1
labels = np.zeros((height, width), dtype=np.uint16)
sketch = np.zeros_like(labels, dtype=float)
# r is the distance between two center points, so we need to multiply by 2
centers = poisson_disc_samples(width=width, height=height, r=radius * 2)
ys = np.arange(height)
xs = np.arange(width)
meshy, meshx = np.meshgrid(ys, xs, indexing="ij")
for i, (x, y) in enumerate(centers):
dist = (meshx - x)**2 + (meshy - y)**2
tmp_radius = np.random.uniform(radius / 2, radius)
mask = dist < tmp_radius**2
# enumerate starts at 0, but 0 is background
labels[mask] = i + 1
tmp_intensity = np.random.uniform(*intensity_prob)
sketch[mask] = (
tmp_radius - np.sqrt(dist[mask])
) / tmp_radius * tmp_intensity
noise = noise_func(sketch)
affinities = get_affinities(labels)
return labels[:-1, :-1], sketch[:-1, :-1], noise[:-1, :-1], affinities
def gaussian_noise(sigma):
def func(sketch):
return sketch + np.random.normal(0, sigma, size=sketch.shape)
return func
width = 600
height = 600
radius = 30
# labels, sketch, noise, affinities = toy_circle_labels_affinities(
# width, height, radius,
# [1, 2],
# gaussian_noise(1.5 / 16)
# )
# affinity_top = (labels[:-1, :] - labels[1:, :]) == 0
# affinity_right = (labels[:, :-1] - labels[:, 1:]) == 0
path = r"D:\pytorch-CycleGAN-and-pix2pix\datasets"
newpath = os.path.join(path, "disk-affinity")
Path(newpath).mkdir(parents=True, exist_ok=True)
sets = ["val", "train", "test"]
num = 100
for s in sets:
paths = []
for b in ["A", "B", "C"]:
pathb = os.path.join(newpath, s+b)
Path(pathb).mkdir(parents=True, exist_ok=True)
paths.append(pathb)
for i in range(num):
labels, sketch, noise, affinities = toy_circle_labels_affinities(
width, height, radius,
[1, 2],
gaussian_noise(1.5 / 16)
)
# we need to save noise as RGB, too, since cycleGAN needs
# input and output channels to meet up
noise =
|
np.repeat(noise[..., np.newaxis], 3, axis=2)
|
numpy.repeat
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import datetime
import os
import sys
import warnings
# parse args
parser = argparse.ArgumentParser(
description='This is the main mnist and fashion mnist experiment.')
parser.add_argument(
'--outfile',
type=str,
help='json file to dump results to; will terminate if file already exists; '
'required',
required=True)
parser.add_argument(
'--dataset',
choices=['mnist', 'fashion_mnist'],
help='dataset to use in experiments; '
'required',
required=True)
parser.add_argument(
'--fold-count',
type=int,
help='number of folds contained in masks; '
'required',
required=True)
parser.add_argument(
'--train-folds',
type=str,
help='colon separated set of folds to use for training; '
'can also specify the training folds to use in each phase using double colons; '
'required',
required=True)
parser.add_argument(
'--prevent-repeats',
action='store_true',
help='prevent the same sample appearing multiple times in the same phase')
parser.add_argument(
'--test-folds',
type=str,
help='colon separated set of folds to use for testing')
parser.add_argument(
'--phases',
type=str,
help='colon separated sets of digits for different phases; '
'required',
required=True)
parser.add_argument(
'--test-on-all-digits',
action='store_true',
help='also test on all the digits appearing in any phase; '
'requires test folds to be specified')
parser.add_argument(
'--log-frequency',
type=int,
help='number of examples to learn on between recording accuracies and predictions; '
'required',
required=True)
parser.add_argument(
'--init-seed',
type=int,
help='seed for network initialization; '
'required',
required=True)
parser.add_argument(
'--shuffle-seed',
type=int,
help='seed for the random number generator used to shuffle datasets')
parser.add_argument(
'--criteria',
choices=['steps', 'offline', 'online'],
help='type of criteria to use when deciding whether or not to move to the next phase')
parser.add_argument(
'--steps',
type=int,
help='number of steps in each phase; '
'required for steps criteria only',
default=None)
parser.add_argument(
'--required-accuracy',
type=float,
help='required classifier accuracy to move onto next phase; '
'required for offline and online criteria only',
default=None)
parser.add_argument(
'--tolerance',
type=int,
help='maximum number of steps to try and satisfy required accuracy in a single phase; '
'required for offline and online criteria only',
default=None)
parser.add_argument(
'--validation-folds',
type=str,
help='colon separated set of folds to use for determining when accuracy criteria is met; '
'required for offline criteria only',
default=None)
parser.add_argument(
'--minimum-steps',
type=int,
help='minimum number of steps before moving onto next phase; '
'required for online criteria only',
default=None)
parser.add_argument(
'--hold-steps',
type=int,
help='minimum number of steps to hold accuracy for before moving onto next stage; '
'required for online criteria only',
default=None)
parser.add_argument(
'--optimizer',
choices=['sgd', 'adam', 'rms'],
help='optimization algorithm to use to train the network; '
'required',
required=True)
parser.add_argument(
'--lr',
type=float,
help='learning rate for training; '
'requried by sgd, adam, and rms optimizer only',
default=None)
parser.add_argument(
'--momentum',
type=float,
help='momentum hyperparameter; '
'requried by sgd optimizer only',
default=None)
parser.add_argument(
'--beta-1',
type=float,
help='beta 1 hyperparameter; '
'requried by adam optimizer only',
default=None)
parser.add_argument(
'--beta-2',
type=float,
help='beta 2 hyperparameter; '
'requried by adam optimizer only',
default=None)
parser.add_argument(
'--rho',
type=float,
help='rho hyperparameter; '
'required by rms optimizer only',
default=None)
experiment = vars(parser.parse_args())
# check that the outfile doesn't already exist
if os.path.isfile(experiment['outfile']):
warnings.warn('outfile already exists; terminating\n')
sys.exit(0)
# check and process fold and phase structure arguments are specified correctly
assert(0 < experiment['fold_count'])
experiment['phases'] = [[int(i) for i in x] for x in experiment['phases'].split(':')]
experiment['digits'] = sorted(list(set(i for j in experiment['phases'] for i in j)))
assert(all([0 <= digit <= 9 for digit in experiment['digits']]))
experiment['train_folds'] = experiment['train_folds'].split('::')
if len(experiment['train_folds']) != len(experiment['phases']):
assert(len(experiment['train_folds']) == 1)
experiment['train_folds'] = [experiment['train_folds'][0] for _ in experiment['phases']]
experiment['train_folds'] = [sorted([int(j)for j in folds.split(':')])
for folds in experiment['train_folds']]
assert(len(experiment['train_folds']) == len(experiment['phases']))
assert(all([0 <= i < experiment['fold_count'] for i in sum(experiment['train_folds'], [])]))
if experiment['test_folds'] is not None:
experiment['test_folds'] = sorted([int(i)for i in experiment['test_folds'].split(':')])
assert(all([0 <= i < experiment['fold_count'] for i in experiment['test_folds']]))
assert(set(experiment['test_folds']).isdisjoint(sum(experiment['train_folds'], [])))
if experiment['test_on_all_digits']:
assert(experiment['test_folds'] is not None)
# check and process criteria arguments
if experiment['criteria'] == 'steps':
assert(experiment['steps'] is not None)
assert(experiment['required_accuracy'] is None)
assert(experiment['tolerance'] is None)
assert(experiment['validation_folds'] is None)
assert(experiment['minimum_steps'] is None)
assert(experiment['hold_steps'] is None)
assert(0 < experiment['steps'])
if experiment['criteria'] == 'offline':
assert(experiment['steps'] is None)
assert(experiment['required_accuracy'] is not None)
assert(experiment['tolerance'] is not None)
assert(experiment['validation_folds'] is not None)
assert(experiment['minimum_steps'] is None)
assert(experiment['hold_steps'] is None)
assert(0 < experiment['required_accuracy'] <= 1)
assert(0 < experiment['tolerance'])
experiment['validation_folds'] = \
sorted([int(i)for i in experiment['validation_folds'].split(':')])
assert(all([0 <= i < experiment['fold_count'] for i in experiment['validation_folds']]))
assert(set(experiment['validation_folds']).isdisjoint(sum(experiment['train_folds'], [])))
assert(set(experiment['validation_folds']).isdisjoint(experiment['test_folds']))
if experiment['criteria'] == 'online':
assert(experiment['steps'] is None)
assert(experiment['required_accuracy'] is not None)
if not experiment['prevent_repeats']:
assert(experiment['tolerance'] is not None)
assert(0 < experiment['tolerance'])
assert(experiment['validation_folds'] is None)
assert(experiment['minimum_steps'] is not None)
assert(experiment['hold_steps'] is not None)
assert(0 < experiment['required_accuracy'] <= 1)
assert(0 <= experiment['minimum_steps'])
assert(0 <= experiment['hold_steps'])
# check that optimizer arguments are specified correctly
if experiment['optimizer'] == 'sgd':
assert(experiment['momentum'] is not None)
assert(experiment['beta_1'] is None)
assert(experiment['beta_2'] is None)
assert(experiment['rho'] is None)
if experiment['optimizer'] == 'adam':
assert(experiment['momentum'] is None)
assert(experiment['beta_1'] is not None)
assert(experiment['beta_2'] is not None)
assert(experiment['rho'] is None)
if experiment['optimizer'] == 'rms':
assert(experiment['momentum'] is None)
assert(experiment['beta_1'] is None)
assert(experiment['beta_2'] is None)
assert(experiment['rho'] is not None)
# check and process all other arguments
assert(0 < experiment['log_frequency'])
# args ok; start experiment
experiment['start_time'] = datetime.datetime.now(datetime.timezone.utc).astimezone().isoformat()
# stop the annoying deprecation warnings when loading tensorflow
warnings.filterwarnings('ignore')
import copy
import json
import numpy as np
import tensorflow as tf
import torch
# setup libraries
torch.set_num_threads(1)
try:
tf.logging.set_verbosity('FATAL')
except AttributeError:
pass
if experiment['init_seed'] is not None:
torch.manual_seed(experiment['init_seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# load dataset and masks
if experiment['dataset'] == 'mnist':
(raw_x_train, raw_y_train), (raw_x_test, raw_y_test) = \
tf.keras.datasets.mnist.load_data()
masks = np.load('mnist_masks.npy', allow_pickle=True)
else:
assert experiment['dataset'] == 'fashion_mnist'
(raw_x_train, raw_y_train), (raw_x_test, raw_y_test) = \
tf.keras.datasets.fashion_mnist.load_data()
masks = np.load('fashion_mnist_masks.npy', allow_pickle=True)
raw_x_train, raw_x_test = raw_x_train / 255.0, raw_x_test / 255.0
raw_x_test, raw_y_test = None, None # disable use of the holdout dataset
# build masks
assert(masks.shape[0] > experiment['fold_count']) # make sure we're not touching the holdout
def build_mask(digits, folds):
return np.array(sum([masks[fold][digit] for fold in folds for digit in digits]), dtype=bool)
train_masks = [build_mask(phase, folds)
for phase, folds in zip(experiment['phases'], experiment['train_folds'])]
if experiment['test_folds'] is not None:
test_masks = [build_mask(phase, experiment['test_folds'])
for phase in experiment['phases']]
if experiment['test_on_all_digits']:
test_masks.append(build_mask(experiment['digits'], experiment['test_folds']))
if experiment['validation_folds'] is not None:
validation_masks = [build_mask(phase, experiment['validation_folds'])
for phase in experiment['phases']]
# build datasets
def shuffle_jointly(x, y):
z = list(zip(x, y))
np.random.RandomState(seed=experiment['shuffle_seed']).shuffle(z)
x, y = zip(* z)
return x, y
x_train = [raw_x_train[mask, ...]
for mask in train_masks]
y_train = [raw_y_train[mask, ...] - min(experiment['digits'])
for mask in train_masks]
for i in range(len(x_train)):
x_train[i], y_train[i] = shuffle_jointly(x_train[i], y_train[i])
x_train[i] = torch.tensor(x_train[i], dtype=torch.float).flatten(start_dim=1)
y_train[i] = torch.tensor(y_train[i], dtype=torch.int)
if experiment['test_folds'] is not None:
x_test = [raw_x_train[mask, ...]
for mask in test_masks]
y_test = [raw_y_train[mask, ...] - min(experiment['digits'])
for mask in test_masks]
mask = np.zeros(len(y_test[-1]), dtype=bool)
digit_counts = np.zeros(max(raw_y_train))
for i, digit in enumerate(y_test[-1]):
if (digit in experiment['digits']) and (digit_counts[digit] < 10):
mask[i] = True
digit_counts[digit] += 1
interference_x_test = x_test[-1][mask, ...] # smaller dataset for second order tests
interference_y_test = y_test[-1][mask, ...]
for i in range(len(x_test)):
x_test[i] = torch.tensor(x_test[i], dtype=torch.float).flatten(start_dim=1)
y_test[i] = torch.tensor(y_test[i], dtype=torch.int)
interference_x_test = torch.tensor(interference_x_test, dtype=torch.float).flatten(start_dim=1)
interference_y_test = torch.tensor(interference_y_test, dtype=torch.int)
if experiment['validation_folds'] is not None:
x_validation = [raw_x_train[mask, ...]
for mask in validation_masks]
y_validation = [raw_y_train[mask, ...] - min(experiment['digits'])
for mask in validation_masks]
for i in range(len(x_validation)):
x_validation[i] = torch.tensor(x_validation[i], dtype=torch.float).flatten(start_dim=1)
y_validation[i] = torch.tensor(y_validation[i], dtype=torch.int)
# build model
dtype = torch.float
linear1 = torch.nn.Linear(28 * 28, 100)
relu1 = torch.nn.ReLU()
linear2 = torch.nn.Linear(100, len(experiment['digits']))
torch.nn.init.normal_(linear1.weight, std=0.1)
torch.nn.init.normal_(linear1.bias, std=0.1)
torch.nn.init.normal_(linear2.weight, std=0.1)
torch.nn.init.normal_(linear2.bias, std=0.1)
model = torch.nn.Sequential(
linear1,
relu1,
linear2
)
loss_fn = torch.nn.CrossEntropyLoss()
# prepare optimizer
if experiment['optimizer'] == 'sgd':
optimizer = torch.optim.SGD(
model.parameters(),
lr=experiment['lr'],
momentum=experiment['momentum']
)
elif experiment['optimizer'] == 'rms':
optimizer = torch.optim.RMSprop(
model.parameters(),
lr=experiment['lr'],
alpha=experiment['rho']
)
else:
assert(experiment['optimizer'] == 'adam')
optimizer = torch.optim.Adam(
model.parameters(),
lr=experiment['lr'],
betas=(experiment['beta_1'], experiment['beta_2'])
)
# prepare buffers to store results
experiment['success'] = True
experiment['correct'] = list()
experiment['phase_length'] = list()
experiment['accuracies'] = None if experiment['test_folds'] is None else list()
experiment['predictions'] = None if experiment['test_folds'] is None else list()
experiment['activation_similarity'] = None if experiment['test_folds'] is None else list()
experiment['pairwise_interference'] = None if experiment['test_folds'] is None else list()
# create helper functions for metrics
@torch.no_grad()
def test_accuracies():
rv = list()
for phase in range(len(x_test)):
x = x_test[phase]
y_pred = model(x).argmax(axis=1)
y = y_test[phase]
accuracy = (y_pred == y).int().float().mean().item()
rv.append(accuracy)
return rv
@torch.no_grad()
def test_predictions():
rv = torch.zeros((len(experiment['digits']), len(experiment['digits'])), dtype=torch.float)
predictions = model(x_test[-1]).argmax(axis=1)
for i in range(len(experiment['digits'])):
for j in range(len(experiment['digits'])):
y_pred = predictions + min(experiment['digits']) == experiment['digits'][i]
y = y_test[-1] + min(experiment['digits']) == experiment['digits'][j]
rv[i, j] = ((y_pred & y).int().sum().float() / y.sum().float()).item()
return rv.tolist()
@torch.no_grad()
def test_activation_similarity():
activations = list()
for i in range(len(interference_x_test)):
activations.append(
(relu1.forward(
linear1.forward(
interference_x_test[i, :]))).numpy())
mean, count = 0, 0
for i in range(len(activations)):
for j in range(i, len(activations)):
value = np.dot(activations[i], activations[j])
count += 1
mean += (value - mean) / count
return float(mean)
@torch.no_grad()
def _interference_test():
return ((interference_y_test - model(interference_x_test).argmax(1)) ** 2).numpy()
def test_pairwise_interference():
pre_performance = np.tile(_interference_test(), (len(interference_x_test), 1))
post_performance = np.zeros_like(pre_performance)
state_dict = copy.deepcopy(model.state_dict())
optimizer_state_dict = copy.deepcopy(optimizer.state_dict())
for i in range(len(interference_x_test)):
y_pred = model(interference_x_test[i, :]).double()
y = interference_y_test[i].long()
loss = loss_fn(y_pred.unsqueeze(0), y.unsqueeze(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
post_performance[i, :] = _interference_test()
model.load_state_dict(copy.deepcopy(state_dict))
optimizer.load_state_dict(copy.deepcopy(optimizer_state_dict))
return float(
|
np.mean(post_performance - pre_performance)
|
numpy.mean
|
"""Reproduction of the algorithm presented by paper "Multi-sphere approximation of real particles for \
DEM simulation based on a modified greedy heuristic algorithm (https://doi.org/10.1016/j.powtec.2015.08.026)".
@author: chuan
"""
import numpy as np
from sklearn.decomposition import PCA
from skimage import io
import matplotlib.pyplot as plt
class CellCollection:
"""Just a data structure of the cell collection.
"""
def __init__(self, sandPointCloud, nX=40):
self.nX = nX
pcaReturns = CellCollection._pcaTransfer(sandPointCloud)
self.pcaModel = pcaReturns[0]
self.pcaPoints = pcaReturns[1]
self.l, self.w = pcaReturns[2]
self.lEnds, self.wEnds = pcaReturns[3]
self.r = self.l / self.nX / 2
self.nY = int(self.w / self.l * self.nX) # 四舍五入还是地板除法,需要搞清楚分割方法
# cellCoords是每一个cell中心点的坐标
self.cellBox, self.cellInd, self.cellCoords = self._getCells()
self.boundaryCoords, self.innerCoords = self._boundaryDetect()
@classmethod
def _pcaTransfer(cls, sandPointCloud):
pcaModel = PCA(n_components=2)
pcaModel = pcaModel.fit(sandPointCloud)
pcaPoints = pcaModel.transform(sandPointCloud)
l = pcaPoints[:, 0].max() - pcaPoints[:, 0].min()
w = pcaPoints[:, 1].max() - pcaPoints[:, 1].min()
assert l >= w
# [l, w]Ends denotes the two ends of the line along l, w axis
lEnds = (pcaPoints[:, 0].min(), pcaPoints[:, 0].max())
wEnds = (pcaPoints[:, 1].min(), pcaPoints[:, 1].max())
return pcaModel, pcaPoints, [l, w], [lEnds, wEnds]
def _getCuboid(self):
cuboidVerticesL, cuboidVerticesW, cuboidVerticesH = np.meshgrid(
self.lEnds, self.wEnds, self.hEnds)
return cuboidVerticesL, cuboidVerticesW, cuboidVerticesH
def _getCells(self):
# 重新命名方便后续书写
nX, nY = self.nX, self.nY
r = self.r
lEnds, wEnds = self.lEnds, self.wEnds
pcaPoints = self.pcaPoints
# 对cellBox三维数组的索引,记坐标(lEnds[0], wEnds[0], hEnds[0])为索引(0, 0, 0)
# clct: collection, ind: index, ctr: center
cellBox =
|
np.zeros((nX, nY), dtype=np.uint8)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""Plotting.py for notebook 04_Modelling_AGN_fractions_from_literature
This python file contains all the functions used for modelling the AGN fraction based on measurements from literature
Script written by: <NAME>
Project supervised by <NAME>
Date created: 20th April 2021
"""
# astropy modules
import astropy.units as u
import astropy.io.fits as fits
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM, z_at_value
import numpy as np
import seaborn as sns
# scipy modules
import scipy.odr as odr
from scipy import interpolate
import os
import sys
import importlib
# plotting imports
import matplotlib
import matplotlib.pyplot as plt
sys.path.append('../imported_files/')
import Agn_incidence_from_Major_Mergers as aimm
import All_sky as sky
import Comparison_simulation_with_literature_data as cswl
import plotting_aim03 as pt
"""
Functions begin
"""
def getXY(arr):
"Function to get x and y arr from the parent arr"
x = [np.vstack(arr)[i][0] for i in range(len(arr))]
y = [np.vstack(arr)[i][1] for i in range(len(arr))]
return x, y
def getErrArrays(x):
"Function to get the error arrays from the parent array"
x_err_arr = [x[i][:2] for i in range(len(x))]
y_err_arr = [x[i][2:] for i in range(len(x))]
return x_err_arr, y_err_arr
def getErr(x_err_arr, x, y_err_arr, y):
"Function to transform the errors arrays into readable pyplot formats"
xerr = np.abs(np.transpose(np.vstack(x_err_arr)) - x)
yerr = np.abs(np.transpose(np.vstack(y_err_arr)) - y)
return xerr, yerr
def powerLaw(beta, x):
return -beta[0]/np.power(x, beta[1])
def performODR(X, Y, xerr_all, yerr_all, func=powerLaw):
"Function to fit the empirical data"
# model object
power_law_model = odr.Model(func)
# data and reverse data object
data = odr.RealData(X, Y, sx=xerr_all, sy=yerr_all)
# odr with model and data
myodr = odr.ODR(data, power_law_model, beta0=[0.2, 0.])
out = myodr.run()
out = myodr.restart(iter=1000)
return out
def getAGNlengths(redshift_limit=2, frac_cp_agn=0.03, all=True):
agn_lens = np.load('../Data/all_sky_agn_lengths_z%.1f_fracCP_%.2f.npy'%(redshift_limit, frac_cp_agn), allow_pickle=True)
rand_agn = np.array([agn_lens[i][0] for i in range(len(agn_lens))])
if all:
cp_agn = np.array([agn_lens[i][1] for i in range(len(agn_lens))])
else:
cp_agn = np.array([agn_lens[i][1] - rand_agn[i] for i in range(len(agn_lens))])
print(rand_agn[-2:-1])
return rand_agn, cp_agn
def loadPairsAllSky(r_p, end_points, data_dir, pixel_no_cont_arr, frac_cp_agn=0.03, cp_agn=True):
n_pairs_separation = np.zeros((0, len(r_p)))
for i, e in enumerate(end_points[:-1]):
ll, ul = pixel_no_cont_arr[int(e)], pixel_no_cont_arr[int(end_points[i+1]-1)]
if cp_agn:
n_pairs = np.load(data_dir+'np_tmm2_xoff6_pixels_%s-%s_fracAGN%.2f.npy'%(ll, ul, frac_cp_agn), allow_pickle=True)
else:
n_pairs = np.load(data_dir+'np_pixels_%s-%s.npy'%(ll, ul), allow_pickle=True)
n_pairs_separation = np.vstack((n_pairs_separation, n_pairs))
return n_pairs_separation
def openAGNhaloPairsFile(file_divide=16, redshift_limit=0.2, num_rp_bins=12, last_px=750,\
old_agn_pairs=True, frac_cp_agn=0.03):
"""
@file_divide :: number of files into which the pair counting is divided for the all sky
@redshift_limit :: lookback into redshift until
@num_rp_bins :: number of r bins that was used when counting pairs
"""
# get shell volume and projected radius bins [Mpc]
r_p, shell_volume = aimm.shellVolume()
r_p_half, shell_volume_half = aimm.shellVolume(num_bins=num_rp_bins )
# pixel number from the simulation file
pixel_no_cont_arr = sky.allPixelNames()
end_points = np.linspace(0, last_px, file_divide)
end_points = np.append(end_points, [767], axis=None)
halo_lens = np.load('../Data/all_sky_halo_lengths_z%.1f.npy'%redshift_limit)
rand_agn, cp_agn = getAGNlengths(redshift_limit=redshift_limit, frac_cp_agn=frac_cp_agn, all=False)
data_cp_dir = '../Data/pairs_z%.1f/cat_AGN_halo_pairs/'%redshift_limit
# get the total number of possible AGN-halo pairs
lens_rand, lens_cp = np.array([halo_lens, rand_agn]), np.array([halo_lens, cp_agn])
tot_p_rand_agn = cswl.GammaDenominator(lens_rand, 0, -1, num_sets=2)
tot_p_cp_agn = cswl.GammaDenominator(lens_cp, 0, -1, num_sets=2)
# repeat same process for old catAGN (without close pairs)
if old_agn_pairs:
r_p_half, shell_volume_half = aimm.shellVolume(num_bins=num_rp_bins )
data_dir = data_cp_dir + 'cat_without_CP/'
n_p_sep = loadPairsAllSky(r_p_half, end_points, data_dir, pixel_no_cont_arr, cp_agn=False)
print(tot_p_rand_agn.shape)
frac_rand_agn_mean = [np.mean(n_p_sep[:, i]/tot_p_rand_agn) for i in range(len(r_p_half))]
frac_rand_agn_std = [np.std(n_p_sep[:, i]/tot_p_rand_agn) for i in range(len(r_p_half))]
mean_gamma_rand, std_gamma_rand = frac_rand_agn_mean[1:]/shell_volume_half, frac_rand_agn_std[1:]/shell_volume_half
n_pairs_sep = loadPairsAllSky(r_p, end_points, data_cp_dir, pixel_no_cont_arr, frac_cp_agn=frac_cp_agn)
# get mean and std of the number density
frac_cp_agn_mean = [np.mean(n_pairs_sep[:, i]/tot_p_cp_agn) for i in range(len(r_p))]
frac_cp_agn_std = [np.std(n_pairs_sep[:, i]/tot_p_cp_agn) for i in range(len(r_p))]
mean_gamma_cp, std_gamma_cp = frac_cp_agn_mean[1:]/shell_volume, frac_cp_agn_std[1:]/shell_volume
names_cp, names_rand = ['Gamma_mean_CP', 'Gamma_std_CP'], ['Gamma_mean_RAND', 'Gamma_std_RAND']
return Table([mean_gamma_cp, std_gamma_cp], names=names_cp), Table([mean_gamma_rand, std_gamma_rand], names=names_rand)
def getFracAgnHaloPairsCp(ax, frac_cp_agn_arr, z=1, num_rp_bins=12):
"""
Function to get the fraction of pairs
"""
# get shell volume and projected radius bins [Mpc]
r_p, _ = aimm.shellVolume()
r_p_half, _ = aimm.shellVolume(num_bins=num_rp_bins )
pal = sns.color_palette("viridis", 4).as_hex()
data_dir = '../Data/pairs_z%.1f/Major_dv_pairs/'%1
gamma_all = np.load(data_dir+'gamma_all_pixels.npy', allow_pickle=True)
f_cp_agn_halo_pairs, f_rand_agn_halo_pairs = np.zeros((0, len(r_p)-1)), np.zeros((0, len(r_p_half)-1))
for f, frac_cp_agn in enumerate(frac_cp_agn_arr):
# read the files that count pairs (AGN-halo and halo-halo) for the new catAGN and old catAGN
g_cp, g_rand = openAGNhaloPairsFile(redshift_limit=z, frac_cp_agn=frac_cp_agn, num_rp_bins=num_rp_bins)
# plot the results and the chanes wrt the old catAGN
gamma_all_inter = pt.plotChangesCatAGN(ax, g_cp, g_rand, label_idx = f, num_rp_bins=num_rp_bins, \
redshift_limit=z, c=pal[f], frac_cp_agn=frac_cp_agn)
# append these values
cols_cp0 = Column(data=gamma_all[0], name='Gamma_meanALL')
cols_cp1 = Column(data=gamma_all[1], name='Gamma_stdALL' )
cols_rand0 = Column(data=gamma_all_inter[0], name='Gamma_meanALL' )
cols_rand1 = Column(data=gamma_all_inter[1], name='Gamma_stdALL' )
g_cp.add_columns([cols_cp0, cols_cp1])
g_rand.add_columns([cols_rand0,cols_rand1])
return g_cp, g_rand
def combineFracTmmXoff(t0, std_t0, frac_xoff_z2, num_decs=20):
models_all = np.zeros((0, len(t0) ))
std_all = np.zeros((0, len(t0) ))
for d in np.arange(0, num_decs, 2):
x, std_x = frac_xoff_z2.columns[d], frac_xoff_z2.columns[d+1]
models_all = np.append(models_all, [t0+x], axis=0)
std = np.sqrt(std_x**2 + std_t0**2)
std_all = np.append(std_all, [std], axis=0)
return models_all, std_all
def generateDecileModels(frac_tmm, frac_xoff, num_decs=20):
models = np.zeros((0, len(frac_tmm)))
std = np.zeros((0, len(frac_tmm)))
for d in np.arange(0, num_decs, 2):
# best possible models for z<2
tmm, std_tmm = frac_tmm.columns[d], frac_tmm.columns[d+1]
models_tmm, std_tmm = combineFracTmmXoff(tmm, std_tmm, frac_xoff, num_decs=num_decs)
models =
|
np.append(models, models_tmm, axis=0)
|
numpy.append
|
import math
import numpy as np
def sind(deg):
rad = math.radians(deg)
return math.sin(rad)
def cosd(deg):
rad = math.radians(deg)
return math.cos(rad)
def tand(deg):
rad = math.radians(deg)
return math.tan(rad)
def cotd(deg):
rad = math.radians(deg)
return math.cos(rad) / math.sin(rad)
def asind(x):
rad = math.asin(x)
return math.degrees(rad)
def acosd(x):
rad = math.acos(x)
return math.degrees(rad)
def atand(x):
rad = math.atan(x)
return math.degrees(rad)
def km2deg(kilometers):
return kilometers / 111.19
def deg2km(degree):
return degree * 111.19
class distaz:
"""
c Subroutine to calculate the Great Circle Arc distance
c between two sets of geographic coordinates
c
c Equations take from Bullen, pages 154, 155
c
c <NAME>, September 19, 1991
c Sept. 25 -- fixed az and baz calculations
c
<NAME>, Setember 27, 1995
Converted to c to fix annoying problem of fortran giving wrong
answers if the input doesn't contain a decimal point.
<NAME>, September 18, 1997
Java version for direct use in java programs.
*
* <NAME>, May 4, 2004
* Added enough convenience constructors to choke a horse and made public double
* values use accessors so we can use this class as an immutable
<NAME>, May 31, 2006
Port to python, thus adding to the great list of languages to which
distaz has been ported from the origin fortran: C, Tcl, Java and now python
and I vaguely remember a perl port. Long live distaz!
"""
def __init__(self, lat1, lon1, lat2, lon2):
self.stalat = lat1
self.stalon = lon1
self.evtlat = lat2
self.evtlon = lon2
'''
if (lat1 == lat2) and (lon1 == lon2):
self.delta = 0.0
self.az = 0.0
self.baz = 0.0
return
'''
rad = 2. * math.pi / 360.0
"""
c
c scolat and ecolat are the geocentric colatitudes
c as defined by Richter (pg. 318)
c
c Earth Flattening of 1/298.257 take from Bott (pg. 3)
c
"""
sph = 1.0 / 298.257
scolat = math.pi / 2.0 - np.arctan((1. - sph) * (1. - sph) * np.tan(lat1 * rad))
ecolat = math.pi / 2.0 - np.arctan((1. - sph) * (1. - sph) * np.tan(lat2 * rad))
slon = lon1 * rad
elon = lon2 * rad
"""
c
c a - e are as defined by Bullen (pg. 154, Sec 10.2)
c These are defined for the pt. 1
c
"""
a = np.sin(scolat) * np.cos(slon)
b = np.sin(scolat) * np.sin(slon)
c = np.cos(scolat)
d = np.sin(slon)
e = -np.cos(slon)
g = -c * e
h = c * d
k = -np.sin(scolat)
"""
c
c aa - ee are the same as a - e, except for pt. 2
c
"""
aa =
|
np.sin(ecolat)
|
numpy.sin
|
import warnings
warnings.filterwarnings('ignore')
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from chainer import Chain
from chainer.backends import cuda
from sklearn.metrics import recall_score
from functools import partial
class MyClassifier(Chain):
prior = 0
it_position = None
def __call__(self, x, t, loss_func):
self.clear()
h = self.calculate(x)
self.loss = loss_func(h, t)
chainer.reporter.report({'loss': self.loss}, self)
return self.loss
def clear(self):
self.loss = None
def calculate(self, x):
return None
def call_reporter(self, dictionary):
chainer.reporter.report(dictionary, self)
def error(self, x, t):
warnings.filterwarnings("ignore")
xp = cuda.get_array_module(x, False)
size = len(t)
with chainer.no_backprop_mode():
with chainer.using_config("train", False):
h = xp.reshape(xp.sign(self.calculate(x).data), size)
if isinstance(h, chainer.Variable):
h = h.data
if isinstance(t, chainer.Variable):
t = t.data
result = (h != t).sum() / size
t, h = t.get(), h.get()
h_separated = ','.join([str(x) for x in h]) + '\n'
h_separated = str(self.it_position) + ',' + h_separated
with open('result/preds.csv', 'a') as f:
f.write(h_separated)
assert h.shape[0] == t.shape[0]
# Calculate partial recall
recall = recall_score(t, h)
# Calculate perc pos and perc pos non fake
h_pos_idx = np.where(h == 1)[0]
perc_pos = h_pos_idx.shape[0]/h.shape[0] if h.shape[0] > 0 else 0.
if len(h_pos_idx) > 0:
perc_pos_nf =
|
np.unique(t[h_pos_idx], return_counts=True)
|
numpy.unique
|
from terraingrid import TerrainGrid
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, precision_score, recall_score
import numpy as np
import cv2 as cv
def pixel_accuracy(eval_segm, gt_segm):
'''
sum_i(n_ii) / sum_i(t_i)
'''
check_size(eval_segm, gt_segm)
cl, n_cl = extract_classes(gt_segm)
eval_mask, gt_mask = extract_both_masks(eval_segm, gt_segm, cl, n_cl)
sum_n_ii = 0
sum_t_i = 0
for i, c in enumerate(cl):
curr_eval_mask = eval_mask[i, :, :]
curr_gt_mask = gt_mask[i, :, :]
sum_n_ii += np.sum(np.logical_and(curr_eval_mask, curr_gt_mask))
sum_t_i += np.sum(curr_gt_mask)
if (sum_t_i == 0):
pixel_accuracy_ = 0
else:
pixel_accuracy_ = sum_n_ii / sum_t_i
return pixel_accuracy_
def mean_accuracy(eval_segm, gt_segm):
'''
(1/n_cl) sum_i(n_ii/t_i)
'''
check_size(eval_segm, gt_segm)
cl, n_cl = extract_classes(gt_segm)
eval_mask, gt_mask = extract_both_masks(eval_segm, gt_segm, cl, n_cl)
accuracy = list([0]) * n_cl
for i, c in enumerate(cl):
curr_eval_mask = eval_mask[i, :, :]
curr_gt_mask = gt_mask[i, :, :]
n_ii = np.sum(np.logical_and(curr_eval_mask, curr_gt_mask))
t_i = np.sum(curr_gt_mask)
if (t_i != 0):
accuracy[i] = n_ii / t_i
mean_accuracy_ = np.mean(accuracy)
return mean_accuracy_
def mean_IU(eval_segm, gt_segm):
'''
(1/n_cl) * sum_i(n_ii / (t_i + sum_j(n_ji) - n_ii))
'''
check_size(eval_segm, gt_segm)
cl, n_cl = union_classes(eval_segm, gt_segm)
_, n_cl_gt = extract_classes(gt_segm)
eval_mask, gt_mask = extract_both_masks(eval_segm, gt_segm, cl, n_cl)
IU = list([0]) * n_cl
for i, c in enumerate(cl):
curr_eval_mask = eval_mask[i, :, :]
curr_gt_mask = gt_mask[i, :, :]
if (np.sum(curr_eval_mask) == 0) or (
|
np.sum(curr_gt_mask)
|
numpy.sum
|
# Modifications to GRIDLOD for nonlinear Helmholtz setup
# Copyright holders: <NAME>, <NAME>
# 2020
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg
import gc
import warnings
from gridlod import fem
from gridlod import util
from gridlod import linalg
from gridlod import coef
from gridlod import transport
from gridlod import interp
from gridlod import world
# Saddle point problem solver
class DirectSolver:
def __init__(self):
pass
def solve(self, A, I, bList, fixed, NPatchCoarse=None, NCoarseElement=None):
return saddleDirectComplex(A, I, bList, fixed)
def saddleDirectComplex(A, B, rhsList, fixed):
A = linalg.imposeBoundaryConditionsStronglyOnMatrix(A, fixed) #
rhsList = [linalg.imposeBoundaryConditionsStronglyOnVector(rhs, fixed) for rhs in rhsList] #
B = linalg.imposeBoundaryConditionsStronglyOnInterpolation(B, fixed) #
K = sparse.bmat([[A, B.T],
[B, None]], format='csc',dtype='complex128')
xList = []
for rhs in rhsList:
b = np.zeros(K.shape[0],dtype='complex128')
b[:np.size(rhs)] = rhs
xAll = sparse.linalg.spsolve(K, b, use_umfpack=True)
xList.append(xAll[:np.size(rhs)])
if(np.any(np.absolute(B*xAll[:np.size(rhs)]) > 1e-10)):
print(np.abs(B*xAll[:np.size(rhs)]))
return xList
def ritzProjectionToFinePatch(patch,
APatchFull,
bPatchFullList,
IPatch,
saddleSolver=None):
if saddleSolver is None:
saddleSolver = DirectSolver() # Fast for small patch problems
world = patch.world
d = np.size(patch.NPatchCoarse)
NPatchFine = patch.NPatchFine
NpFine = patch.NpFine
# Find what patch faces are common to the world faces, and inherit
# boundary conditions from the world for those. For the other
# faces, all DoFs fixed (Dirichlet)
boundaryMapWorld = world.boundaryConditions==0
inherit0 = patch.iPatchWorldCoarse==0
inherit1 = (patch.iPatchWorldCoarse + patch.NPatchCoarse)==world.NWorldCoarse
boundaryMap = np.ones([d, 2], dtype='bool')
boundaryMap[inherit0,0] = boundaryMapWorld[inherit0,0]
boundaryMap[inherit1,1] = boundaryMapWorld[inherit1,1]
# Using schur complement solver for the case when there are no
# Dirichlet conditions does not work. Fix if necessary.
fixed = util.boundarypIndexMap(NPatchFine, boundaryMap)
projectionsList = saddleSolver.solve(APatchFull, IPatch, bPatchFullList, fixed, patch.NPatchCoarse, world.NCoarseElement)
return projectionsList
class CoarseScaleInformation_helmholtz:
def __init__(self, Kij, Kmsij, muTPrime, Mij, Mmsij, Bdij, Bdmsij):
self.Kij = Kij
self.Kmsij = Kmsij
self.muTPrime = muTPrime
self.Mij = Mij
self.Mmsij = Mmsij
self.Bdij = Bdij
self.Bdmsij = Bdmsij
def computeElementCorrector_helmholtz(patch, IPatch, aPatch, kPatch, k2Patch, ARhsList=None, MRhsList=None, saddleSolver=None):
'''Compute the fine correctors over a patch.
Compute the correctors
B( Q_T_j, vf)_{U_K(T)} = B( ARhs_j, vf)_{T} + (MRhs_j, vf)_{T}
where B is the sesquilinear form associated with the linear Helmholtz eq.
'''
while callable(IPatch):
IPatch = IPatch()
while callable(aPatch):
aPatch = aPatch()
while callable(kPatch):
kPatch = kPatch()
while callable(k2Patch):
k2Patch = k2Patch()
assert(ARhsList is not None or MRhsList is not None)
numRhs = None
if ARhsList is not None:
assert(numRhs is None or numRhs == len(ARhsList))
numRhs = len(ARhsList)
if MRhsList is not None:
assert(numRhs is None or numRhs == len(MRhsList))
numRhs = len(MRhsList)
world = patch.world
NCoarseElement = world.NCoarseElement
NPatchCoarse = patch.NPatchCoarse
d = np.size(NCoarseElement)
NPatchFine = NPatchCoarse*NCoarseElement
NtFine = np.prod(NPatchFine)
NpFineCoarseElement = np.prod(NCoarseElement+1)
NpCoarse = np.prod(NPatchCoarse+1)
NpFine = np.prod(NPatchFine+1)
assert(aPatch.shape[0] == NtFine)
assert(aPatch.ndim == 1 or aPatch.ndim == 3)
assert(kPatch.ndim == 1)
assert(k2Patch.ndim == 1)
if aPatch.ndim == 1:
ALocFine = world.ALocFine
elif aPatch.ndim == 3:
ALocFine = world.ALocMatrixFine
MLocFine = world.MLocFine
BdLocFine = fem.localBoundaryMassMatrixGetter(NCoarseElement*world.NWorldCoarse)
iElementPatchCoarse = patch.iElementPatchCoarse
elementFinetIndexMap = util.extractElementFine(NPatchCoarse,
NCoarseElement,
iElementPatchCoarse,
extractElements=True)
elementFinepIndexMap = util.extractElementFine(NPatchCoarse,
NCoarseElement,
iElementPatchCoarse,
extractElements=False)
# global boundary?
bdMapWorld = world.boundaryConditions == 1
# on element
bdMapElement = np.zeros([d, 2], dtype='bool')
inheritElement0 = patch.iElementWorldCoarse == 0
inheritElement1 = (patch.iElementWorldCoarse + np.ones(d)) == world.NWorldCoarse
bdMapElement[inheritElement0, 0] = bdMapWorld[inheritElement0, 0]
bdMapElement[inheritElement1, 1] = bdMapWorld[inheritElement1, 1]
# on patch
inherit0 = patch.iPatchWorldCoarse == 0
inherit1 = (patch.iPatchWorldCoarse + NPatchCoarse) == world.NWorldCoarse
bdMapPatch = np.zeros([d, 2], dtype='bool')
bdMapPatch[inherit0, 0] = bdMapWorld[inherit0, 0]
bdMapPatch[inherit1, 1] = bdMapWorld[inherit1, 1]
if ARhsList is not None:
AElementFull = fem.assemblePatchMatrix(NCoarseElement, ALocFine, aPatch[elementFinetIndexMap])
k2MElementFull = fem.assemblePatchMatrix(NCoarseElement, MLocFine, k2Patch[elementFinetIndexMap])
kBdElementFull = fem.assemblePatchBoundaryMatrix(NCoarseElement, BdLocFine, kPatch[elementFinetIndexMap],bdMapElement)
if MRhsList is not None:
MElementFull = fem.assemblePatchMatrix(NCoarseElement, MLocFine)
APatchFull = fem.assemblePatchMatrix(NPatchFine, ALocFine, aPatch)
k2MPatchFull = fem.assemblePatchMatrix(NPatchFine, MLocFine, k2Patch)
kBdPatchFull = fem.assemblePatchBoundaryMatrix(NPatchFine, BdLocFine, kPatch, bdMapPatch)
SPatchFull = APatchFull - k2MPatchFull + 1j*kBdPatchFull
bPatchFullList = []
for rhsIndex in range(numRhs):
bPatchFull = np.zeros(NpFine,dtype='complex128')
if ARhsList is not None:
bPatchFull[elementFinepIndexMap] += (AElementFull - k2MElementFull + 1j*kBdElementFull)*ARhsList[rhsIndex]
if MRhsList is not None:
bPatchFull[elementFinepIndexMap] += MElementFull*MRhsList[rhsIndex]
bPatchFullList.append(bPatchFull)
correctorsList = ritzProjectionToFinePatch(patch,
SPatchFull,
bPatchFullList,
IPatch,
saddleSolver)
return correctorsList
def computeBasisCorrectors_helmholtz(patch, IPatch, aPatch, kPatch, k2Patch, saddleSolver=None):
'''Compute the fine basis correctors over the patch.
Compute the correctors Q_T\lambda_i:
B( Q_T lambda_j, vf)_{U_K(T)} = B( lambda_j, vf)_{T}
where B is the sesquilinear form associated with the linear Helmholtz eq.
'''
ARhsList = list(patch.world.localBasis.T)
return computeElementCorrector_helmholtz(patch, IPatch, aPatch, kPatch, k2Patch, ARhsList, saddleSolver=None)
def computeEftErrorIndicatorsCoarseFromGreeks(etaT, cetaTPrime, greeksPatch):
'''Compute the coarse error idicator E(T) from the "greeks" delta and kappa,
where
deltaMaxTPrime = || ANew^{-1/2} (ANew - AOld) AOld^{-1/2} ||_max(TPrime)
over all coarse elements TPrime in the patch
kappaMaxT = || AOld^{1/2) ANew^{-1/2} ||_max(T)
over the patch coarse T (patch.iPatchWorldCoarse)
nuT = || f - f_ref ||_L2(T)
'''
while callable(greeksPatch):
greeksPatch = greeksPatch()
deltaMaxTPrime, kappaMaxT, xiMaxT, nuT, gammaT = greeksPatch
eft_square = xiMaxT**2 * nuT**2 * etaT
eRft_square = kappaMaxT**2 * np.sum((deltaMaxTPrime**2)*cetaTPrime)
return np.sqrt(eft_square), np.sqrt(eRft_square)
def computeErrorIndicatorCoarse_helmholtz(patch, muTPrime, aPatchOld, aPatchNew):
''' Compute the coarse error idicator E(T) with explicit value of AOld and ANew.
This requires muTPrime from CSI and the new and old coefficient.
'''
while callable(muTPrime):
muTPrime = muTPrime()
while callable(aPatchOld):
aPatchOld = aPatchOld()
while callable(aPatchNew):
aPatchNew = aPatchNew()
aOld = aPatchOld
aNew = aPatchNew
world = patch.world
NPatchCoarse = patch.NPatchCoarse
NCoarseElement = world.NCoarseElement
NPatchFine = NPatchCoarse*NCoarseElement
iElementPatchCoarse = patch.iElementPatchCoarse
elementCoarseIndex = util.convertpCoordIndexToLinearIndex(NPatchCoarse-1, iElementPatchCoarse)
TPrimeFinetStartIndices = util.pIndexMap(NPatchCoarse-1, NPatchFine-1, NCoarseElement)
TPrimeFinetIndexMap = util.lowerLeftpIndexMap(NCoarseElement-1, NPatchFine-1)
TPrimeIndices = np.add.outer(TPrimeFinetStartIndices, TPrimeFinetIndexMap)
aTPrime = aNew[TPrimeIndices]
aOldTPrime = aOld[TPrimeIndices]
deltaMaxTPrime = np.max(np.abs(aTPrime - aOldTPrime), axis=1)
epsilonTSquare = np.sum((deltaMaxTPrime ** 2) * muTPrime)
return np.sqrt(epsilonTSquare)
def performTPrimeLoop_helmholtz(patch, lambdasList, correctorsList, aPatch, kPatch, k2Patch, accumulate):
while callable(aPatch):
aPatch = aPatch()
while callable(kPatch):
kPatch = kPatch()
while callable(k2Patch):
k2Patch = k2Patch()
world = patch.world
NCoarseElement = world.NCoarseElement
NPatchCoarse = patch.NPatchCoarse
NPatchFine = NPatchCoarse*NCoarseElement
NTPrime = np.prod(NPatchCoarse)
NpPatchCoarse = np.prod(NPatchCoarse+1)
d =
|
np.size(NPatchCoarse)
|
numpy.size
|
import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib.animation import FuncAnimation, writers
np.random.seed(0)
def cost_dz(R_z, z, z_goal):
# compute the first-order deravative of latent cost w.r.t z
z_diff = np.expand_dims(z - z_goal, axis=-1)
return np.squeeze(2 * np.matmul(R_z, z_diff))
def cost_du(R_u, u):
# compute the first-order deravative of latent cost w.r.t u
return np.atleast_1d(np.squeeze(2 * np.matmul(R_u, np.expand_dims(u, axis=-1))))
def cost_dzz(R_z):
# compute the second-order deravative of latent cost w.r.t z
return 2 * R_z
def cost_duu(R_u):
# compute the second-order deravative of latent cost w.r.t u
return 2 * R_u
def cost_duz(z, u):
# compute the second-order deravative of latent cost w.r.t uz
return np.zeros((u.shape[-1], z.shape[-1]))
def latent_cost(R_z, R_u, z_seq, z_goal, u_seq):
z_diff = np.expand_dims(z_seq - z_goal, axis=-1)
cost_z = np.squeeze(np.matmul(np.matmul(z_diff.transpose((0, 2, 1)), R_z), z_diff))
u_seq_reshaped = np.expand_dims(u_seq, axis=-1)
cost_u = np.squeeze(np.matmul(np.matmul(u_seq_reshaped.transpose((0, 2, 1)), R_u), u_seq_reshaped))
return np.sum(cost_z) + np.sum(cost_u)
def one_step_back(R_z, R_u, z, u, z_goal, A, B, V_prime_next_z, V_prime_next_zz, mu_inv_regulator):
"""
V_prime_next_z: first order derivative of the value function at time step t+1
V_prime_next_zz: second order derivative of the value function at time tep t+1
A: derivative of F(z, u) w.r.t z at z_bar_t, u_bar_t
B: derivative of F(z, u) w.r.t u at z_bar_t, u_bar_t
"""
# compute Q_z, Q_u, Q_zz, Q_uu, Q_uz using cost function, A, B and V
Q_z = cost_dz(R_z, z, z_goal) + np.matmul(A.transpose(), V_prime_next_z)
Q_u = cost_du(R_u, u) + np.matmul(B.transpose(), V_prime_next_z)
Q_zz = cost_dzz(R_z) + np.matmul(np.matmul(A.transpose(), V_prime_next_zz), A)
Q_uz = cost_duz(z, u) + np.matmul(np.matmul(B.transpose(), V_prime_next_zz), A)
Q_uu = cost_duu(R_u) + np.matmul(np.matmul(B.transpose(), V_prime_next_zz), B)
# compute k and K matrix, add regularization to Q_uu
Q_uu_regularized = Q_uu + mu_inv_regulator * np.eye(Q_uu.shape[0])
Q_uu_in = np.linalg.inv(Q_uu_regularized)
k = -np.matmul(Q_uu_in, Q_u)
K = -np.matmul(Q_uu_in, Q_uz)
# compute V_z and V_zz using k and K
V_prime_z = Q_z + np.matmul(Q_uz.transpose(), k)
V_prime_zz = Q_zz + np.matmul(Q_uz.transpose(), K)
return k, K, V_prime_z, V_prime_zz
def backward(R_z, R_u, z_seq, u_seq, z_goal, A_seq, B_seq, inv_regulator):
"""
do the backward pass
return a sequence of k and K matrices
"""
# first and second order derivative of the value function at the last time step
V_prime_next_z = cost_dz(R_z, z_seq[-1], z_goal)
V_prime_next_zz = cost_dzz(R_z)
k, K = [], []
act_seq_len = len(u_seq)
for t in reversed(range(act_seq_len)):
k_t, K_t, V_prime_z, V_prime_zz = one_step_back(
R_z, R_u, z_seq[t], u_seq[t], z_goal, A_seq[t], B_seq[t], V_prime_next_z, V_prime_next_zz, inv_regulator
)
k.insert(0, k_t)
K.insert(0, K_t)
V_prime_next_z, V_prime_next_zz = V_prime_z, V_prime_zz
return k, K
def forward(z_seq, u_seq, k, K, dynamics, alpha):
"""
update the trajectory, given k and K
!!!! update using the linearization matricies (A and B), not the learned dynamics
"""
z_seq_new = []
z_seq_new.append(z_seq[0])
u_seq_new = []
for i in range(0, len(u_seq)):
u_new = u_seq[i] + alpha * k[i] + np.matmul(K[i], z_seq_new[i] - z_seq[i])
u_seq_new.append(u_new)
with torch.no_grad():
z_new = dynamics(torch.from_numpy(z_seq_new[i]).unsqueeze(0), torch.from_numpy(u_new).unsqueeze(0))[0].mean
z_seq_new.append(z_new.squeeze().numpy())
return np.array(z_seq_new), np.array(u_seq_new)
# def forward(u_seq, k_seq, K_seq, A_seq, B_seq, alpha):
# """
# update the trajectory, given k and K
# !!!! update using the linearization matricies (A and B), not the learned dynamics
# """
# u_new_seq = []
# plan_len = len(u_seq)
# z_dim = K_seq[0].shape[1]
# for i in range(0, plan_len):
# if i == 0:
# z_delta = np.zeros(z_dim)
# else:
# z_delta = np.matmul(A_seq[i-1], z_delta) + np.matmul(B_seq[i-1], u_delta)
# u_delta = alpha * (k_seq[i] + np.matmul(K_seq[i], z_delta))
# u_new_seq.append(u_seq[i] + u_delta)
# return np.array(u_new_seq)
def get_x_data(mdp, state, config):
image_data = mdp.render(state).squeeze()
x_dim = config["obs_shape"]
if config["task"] == "plane":
x_dim = np.prod(x_dim)
x_data = torch.from_numpy(image_data).double().view(x_dim).unsqueeze(0)
elif config["task"] in ["swing", "balance"]:
x_dim =
|
np.prod(x_dim)
|
numpy.prod
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), '../'))
from sklearn.neighbors import DistanceMetric
from sklearn.preprocessing import normalize, MinMaxScaler
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
from matchers.matcher_base import MatcherBase
class PretrainedCnnMatcher(MatcherBase):
def __init__(self):
self.batch_size = 10
self.model = ResNet50(weights='imagenet',
include_top=False, pooling='avg')
self.dist = DistanceMetric.get_metric('euclidean')
self.min_max_scaler = MinMaxScaler()
def distance_metrics(self, scenes, is_norm=True):
feats = self.get_feats(scenes)
dist_metrics = self.dist.pairwise(feats, feats)
# print(dist_metrics)
if is_norm:
# dist_metrics = normalize(dist_metrics, axis=0, norm='l2')
# dist_metrics = self.min_max_scaler.fit_transform(dist_metrics)
dist_metrics = (dist_metrics - np.min(dist_metrics)) / (
|
np.max(dist_metrics)
|
numpy.max
|
import sys
from pathlib import Path
file = Path(__file__).resolve()
root = file.parents[3]
sys.path.append(str(root))
if __name__ == "__main__":
import numpy
from coopihc.space.Space import Space
from coopihc.space.StateElement import StateElement
numpy.set_printoptions(precision=3, suppress=True)
# [start-stateelement-init]
x = StateElement(
values=None,
spaces=[
Space(
[
numpy.array([-1], dtype=numpy.float32),
|
numpy.array([1], dtype=numpy.float32)
|
numpy.array
|
"""
Modeling homework, problem 2
Conservative differential equations: accuracy and fidelity
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import solve_ivp
A = 6
def V(y):
return y**2 * (
|
np.log(y**2)
|
numpy.log
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.maps.mapmaking Contains the MapMaker class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import copy
import numpy as np
# Import astronomical modules
from astropy.units import Unit
from photutils import detect_sources
# Import the relevant PTS classes and modules
from ...magic.basics.mask import Mask
from ...magic.core.image import Image
from ...magic.core.frame import Frame
from .component import MapsComponent
from ...core.tools import filesystem as fs
from ..decomposition.decomposition import load_parameters
from ...core.tools.logging import log
from .dust.buat import BuatDustMapMaker
from .dust.cortese import CorteseDustMapMaker
from .dust.blackbody import BlackBodyDustMapMaker
from .stars.old import OldStellarMapMaker
from .stars.young import YoungStellarMapMaker
from .stars.ionizing import IonizingStellarMapMaker
# -----------------------------------------------------------------
class MapMaker(MapsComponent):
"""
This class...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(MapMaker, self).__init__(config)
# -- Attributes --
# The structural galaxy parameters
self.parameters = None
self.distance_mpc = None
# Input images
self.images = dict()
# Cutoff masks
self.cutoff_masks = dict()
# Bulge and disk
self.disk = None
self.bulge = None
# Set the low signal-to-noise mask to None initially
self.mask = None
# The map of dust
self.dust = None
# The map of old stars
self.old_stars = None
# The map of young stars
self.young_stars = None
# The map of ionizing stars
self.ionizing_stars = None
# -----------------------------------------------------------------
@classmethod
def from_arguments(cls, arguments):
"""
This function ...
:param arguments:
:return:
"""
# Create a new MapMaker instance
maker = cls(arguments.config)
# Set the input and output path
maker.config.path = arguments.path
# A single map name can be specified so the procedure is only run for that map
maker.config.single_map = arguments.map
# Return the new instance
return maker
# -----------------------------------------------------------------
def run(self):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the input images
self.load_images()
# Calculate the signal-to-noise
self.create_cutoff_masks()
self.write_cutoff_masks()
# 3. Cut-off the low signal-to-noise pixels
#self.cutoff_low_snr()
# 4. If requested, save the maps with masked
#if self.config.save_cutoff_maps: self.save_cutoff_maps()
# 5. Convert maps to solar luminosity units
self.convert_units()
# Make the dust map
self.make_dust_map()
# Make the old stars map
self.make_old_stars_map()
# Make the young (non-ionizing) stars map
self.make_young_stars_map()
# Make the ionizing young stars map
self.make_ionizing_stars_map()
# Writing
self.write()
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(MapMaker, self).setup()
# Determine the path to the parameters file
path = fs.join(self.components_path, "parameters.dat")
# Load the structural parameters
self.parameters = load_parameters(path)
# Get the galaxy distance
self.distance_mpc = self.parameters.distance.to("Mpc").value
# -----------------------------------------------------------------
def load_images(self):
"""
This function ...
:return:
"""
# Load the GALEX FUV image
self.load_image("GALEX FUV", "FUV")
# Load the SDSS i image
self.load_image("SDSS i", "i")
# Load the 2MASS H image
self.load_image("2MASS H", "H")
# Load the H-alpha image
self.load_image("Mosaic Halpha", "Halpha")
# Load the 24 micron image
self.load_image("MIPS 24mu", "24mu")
# Load the IRAC 3.6 image
self.load_image("IRAC I1", "3.6mu")
# Load the PACS 70 image
self.load_image("Pacs blue", "70mu")
# load the PACS 160 image
self.load_image("Pacs red", "160mu")
# Load the disk image
self.load_disk()
# Load the bulge image
self.load_bulge()
# -----------------------------------------------------------------
def load_disk(self):
"""
This function ...
"""
# Inform the user
log.info("Loading the disk image ...")
# Determine the path to the disk image
path = fs.join(self.truncation_path, "disk.fits")
# Load the disk image
self.disk = Frame.from_file(path)
# -----------------------------------------------------------------
def load_bulge(self):
"""
This function ...
"""
# Inform the user
log.info("Loading the bulge image ...")
# Determine the path to the bulge image
path = fs.join(self.truncation_path, "bulge.fits")
# Load the bulge image
self.bulge = Frame.from_file(path)
# -----------------------------------------------------------------
def load_image(self, image_name, image_id):
"""
This function ...
:param image_name:
:param image_id:
"""
# Inform the user
log.info("Loading the " + image_name + " image ...")
# Determine the full path to the image
path = fs.join(self.truncation_path, image_name + ".fits")
# Check whether the image is present
if not fs.is_file(path): raise IOError("Could not find the " + image_name + " image")
# Open the image
image = Image.from_file(path)
# Assert that the units are MJy/sr
if not "Halpha" in image_name: assert image.unit == Unit("MJy/sr")
# Add the image to the dictionary
self.images[image_id] = image
# -----------------------------------------------------------------
def create_cutoff_masks(self):
"""
This function ...
:return:
"""
sigma_level = 3.0
# Loop over all images
for name in self.images:
# Calculate the signal-to-noise ratio in each pixel
snr = self.images[name].frames.primary / self.images[name].frames.errors
# Calculate the snr > sigma level mask and add it to the dictionary
self.cutoff_masks[name] = Mask(snr < sigma_level)
# -----------------------------------------------------------------
def write_cutoff_masks(self):
"""
This function ...
:return:
"""
# Loop over all cutoff masks
for name in self.cutoff_masks:
# Get the mask
mask = self.cutoff_masks[name]
# Determine the path to the FITS file
path = fs.join(self.maps_cutoff_path, name + ".fits")
# Save the mask as a FITS file
Frame(mask.astype(float)).save(path)
# -----------------------------------------------------------------
def cutoff_low_snr(self):
"""
This function ...
:return:
"""
# Check whether the reference image is present
if os.path.isfile(self.config.cutoff.reference_path):
# Open the reference image
reference = Image.from_file(self.config.cutoff.reference_path)
# Check whether the errors frame is present
assert self.config.errors in reference.frames, "An error map could not be found for the reference image"
# Create a mask for the pixels with a signal-to-noise ratio of 5 or less
data = reference.frames[self.config.primary] < self.config.cutoff.level*reference.frames[self.config.errors]
self.mask = Mask(data)
# If requested, remove holes from the cut-off mask
if self.config.cutoff.remove_holes:
# Save the mask as a FITS file
Frame(self.mask.astype(float)).save(self.config.saving.cutoff_mask_with_holes_path)
# Perform the segmentation
segments = detect_sources(self.mask.astype(float), 0.5, 1).data
# Save segments
Frame(segments.astype(float)).save(self.config.saving.cutoff_mask_segments_path)
# Find the label of the largest segment (=the background)
label_counts = np.bincount(segments.flatten())
background_label = np.argmax(label_counts)
# Create a mask for the holes identified as background
holes = copy.deepcopy(self.mask)
holes[segments == background_label] = False
# Save holes mask
Frame(holes.astype(float)).save(self.config.saving.cutoff_mask_holes_path)
# Remove holes from the mask
self.mask[holes] = False
# Save the mask as a FITS file
Frame(self.mask.astype(float)).save(self.config.saving.cutoff_mask_path)
# If not, raise an error
else: raise IOError("The prepared reference image could not be found")
# Cut-off the input images at the same contour level
for name in self.images: self.images[name].apply_mask(self.mask, 0.0)
# Cut-off the bulge and disk map at the same contour level
self.disk[self.mask] = 0.0
self.bulge[self.mask] = 0.0
# -----------------------------------------------------------------
def save_cutoff_maps(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Saving input images set to zero outside the low signal-to-noise contour level")
# Save each of the frames
self.h.save(self.config.saving.h_cutoff_path)
self.fuv.save(self.config.saving.fuv_cutoff_path)
self.ha.save(self.config.saving.ha_cutoff_path)
self.irac.save(self.config.saving.irac_cutoff_path)
self.mips.save(self.config.saving.mips_cutoff_path)
self.pacsblue.save(self.config.saving.pacsblue_cutoff_path)
self.pacsred.save(self.config.saving.pacsred_cutoff_path)
self.disk.save(self.config.saving.disk_cutoff_path)
self.bulge.save(self.config.saving.bulge_cutoff_path)
# -----------------------------------------------------------------
def convert_units(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Converting the H-alpha, 24mu, 70mu and 160mu images to solar luminosities ...")
# FUV is not converted to Lsun
# Convert the H-alpha image to solar luminosities
self.convert_halpha_to_solar()
# Convert the MIPS image to solar luminosities
self.convert_mips_to_solar()
# Convert the PACS images to solar luminosities
self.convert_pacs_to_solar()
# Save the images that are converted to solar units
halpa_path = fs.join(self.maps_solar_path, self.images["Halpha"].name + ".fits")
self.images["Halpha"].save(halpa_path)
mips_path = fs.join(self.maps_solar_path, self.images["24mu"].name + ".fits")
self.images["24mu"].save(mips_path)
pacs70_path = fs.join(self.maps_solar_path, self.images["70mu"].name + ".fits")
self.images["70mu"].save(pacs70_path)
pacs160_path = fs.join(self.maps_solar_path, self.images["160mu"].name + ".fits")
self.images["160mu"].save(pacs160_path)
# -----------------------------------------------------------------
def convert_halpha_to_solar(self):
"""
This function converts the H alpha image from
:return:
"""
# Debugging
log.debug("Converting the H-alpha image to solar luminosities ...")
# Convert from erg/s to Lsun
self.images["Halpha"].convert_to("Lsun")
# -----------------------------------------------------------------
def convert_mips_to_solar(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Converting the 24 micron image to solar luminosities ...")
# Calculate conversion factors from MJy/sr to solar luminosities
exponent = (2.0*np.log10(2.85/206264.806247)) - 20.0 + np.log10(3e8/24e-6) + np.log10(4.0*np.pi) + (2.0*np.log10(self.distance_mpc*3.08567758e22)) - np.log10(3.846e26)
# Multiply the image
self.images["24mu"] *= 10.**exponent
# Set the new unit
self.images["24mu"].unit = "Lsun"
# -----------------------------------------------------------------
def convert_pacs_to_solar(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Converting the 70 and 160 micron images to solar luminosities ...")
# Calculate the conversion factors
exponent_70 = (2.0*np.log10(2.85/206264.806247)) - 20.0 + np.log10(3e8/70e-6) + np.log10(4.0*np.pi) + (2.0*np.log10(self.distance_mpc*3.08567758e22)) - np.log10(3.846e26)
exponent_160 = (2.0*np.log10(2.85/206264.806247)) - 20.0 + np.log10(3e8/160e-6) + np.log10(4.0*np.pi) + (2.0*np.log10(self.distance_mpc*3.08567758e22)) -
|
np.log10(3.846e26)
|
numpy.log10
|
from __future__ import division
'''
NeuroLearn Statistics Tools
===========================
Tools to help with statistical analyses.
'''
__all__ = ['pearson',
'zscore',
'fdr',
'holm_bonf',
'threshold',
'multi_threshold',
'winsorize',
'trim',
'calc_bpm',
'downsample',
'upsample',
'fisher_r_to_z',
'one_sample_permutation',
'two_sample_permutation',
'correlation_permutation',
'matrix_permutation',
'jackknife_permutation',
'make_cosine_basis',
'summarize_bootstrap',
'regress',
'procrustes',
'procrustes_distance',
'align',
'find_spikes',
'correlation',
'distance_correlation',
'transform_pairwise',
'double_center',
'u_center',]
import numpy as np
import pandas as pd
from scipy.stats import pearsonr, spearmanr, kendalltau, norm, ttest_1samp
from scipy.stats import t as t_dist
from scipy.spatial.distance import squareform, pdist
from copy import deepcopy
import nibabel as nib
from scipy.interpolate import interp1d
import warnings
import itertools
from joblib import Parallel, delayed
import six
from .utils import attempt_to_import, check_square_numpy_matrix
from .external.srm import SRM, DetSRM
from scipy.linalg import orthogonal_procrustes
from scipy.spatial import procrustes as procrust
from scipy.ndimage import label, generate_binary_structure
from sklearn.utils import check_random_state
from sklearn.metrics import pairwise_distances
MAX_INT = np.iinfo(np.int32).max
# Optional dependencies
sm = attempt_to_import('statsmodels.tsa.arima_model', name='sm')
def pearson(x, y):
""" Correlates row vector x with each row vector in 2D array y.
From neurosynth.stats.py - author: <NAME>
"""
data = np.vstack((x, y))
ms = data.mean(axis=1)[(slice(None, None, None), None)]
datam = data - ms
datass = np.sqrt(np.sum(datam*datam, axis=1))
# datass = np.sqrt(ss(datam, axis=1))
temp = np.dot(datam[1:], datam[0].T)
rs = temp / (datass[1:] * datass[0])
return rs
def zscore(df):
""" zscore every column in a pandas dataframe or series.
Args:
df: (pd.DataFrame) Pandas DataFrame instance
Returns:
z_data: (pd.DataFrame) z-scored pandas DataFrame or series instance
"""
if isinstance(df, pd.DataFrame):
return df.apply(lambda x: (x - x.mean())/x.std())
elif isinstance(df, pd.Series):
return (df-np.mean(df))/np.std(df)
else:
raise ValueError("Data is not a Pandas DataFrame or Series instance")
def fdr(p, q=.05):
""" Determine FDR threshold given a p value array and desired false
discovery rate q. Written by <NAME>
Args:
p: (np.array) vector of p-values (only considers non-zero p-values)
q: (float) false discovery rate level
Returns:
fdr_p: (float) p-value threshold based on independence or positive
dependence
"""
if not isinstance(p, np.ndarray):
raise ValueError('Make sure vector of p-values is a numpy array')
s = np.sort(p)
nvox = p.shape[0]
null = np.array(range(1, nvox + 1), dtype='float') * q / nvox
below = np.where(s <= null)[0]
fdr_p = s[max(below)] if len(below) else -1
return fdr_p
def holm_bonf(p, alpha=.05):
""" Compute corrected p-values based on the Holm-Bonferroni method, i.e. step-down procedure applying iteratively less correction to highest p-values. A bit more conservative than fdr, but much more powerful thanvanilla bonferroni.
Args:
p: (np.array) vector of p-values
alpha: (float) alpha level
Returns:
bonf_p: (float) p-value threshold based on bonferroni
step-down procedure
"""
if not isinstance(p, np.ndarray):
raise ValueError('Make sure vector of p-values is a numpy array')
s = np.sort(p)
nvox = p.shape[0]
null = .05 / (nvox - np.arange(1, nvox + 1) + 1)
below = np.where(s <= null)[0]
bonf_p = s[max(below)] if len(below) else -1
return bonf_p
def threshold(stat, p, thr=.05, return_mask=False):
""" Threshold test image by p-value from p image
Args:
stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric
(e.g., beta, t, etc)
p: (Brain_Data) Brain_data instance of p-values
threshold: (float) p-value to threshold stat image
return_mask: (bool) optionall return the thresholding mask; default False
Returns:
out: Thresholded Brain_Data instance
"""
from nltools.data import Brain_Data
if not isinstance(stat, Brain_Data):
raise ValueError('Make sure stat is a Brain_Data instance')
if not isinstance(p, Brain_Data):
raise ValueError('Make sure p is a Brain_Data instance')
# Create Mask
mask = deepcopy(p)
if thr > 0:
mask.data = (mask.data < thr).astype(int)
else:
mask.data = np.zeros(len(mask.data), dtype=int)
# Apply Threshold Mask
out = deepcopy(stat)
if np.sum(mask.data) > 0:
out = out.apply_mask(mask)
out.data = out.data.squeeze()
else:
out.data = np.zeros(len(mask.data), dtype=int)
if return_mask:
return out, mask
else:
return out
def multi_threshold(t_map, p_map, thresh):
""" Threshold test image by multiple p-value from p image
Args:
stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric
(e.g., beta, t, etc)
p: (Brain_Data) Brain_data instance of p-values
threshold: (list) list of p-values to threshold stat image
Returns:
out: Thresholded Brain_Data instance
"""
from nltools.data import Brain_Data
if not isinstance(t_map, Brain_Data):
raise ValueError('Make sure stat is a Brain_Data instance')
if not isinstance(p_map, Brain_Data):
raise ValueError('Make sure p is a Brain_Data instance')
if not isinstance(thresh, list):
raise ValueError('Make sure thresh is a list of p-values')
affine = t_map.to_nifti().get_affine()
pos_out = np.zeros(t_map.to_nifti().shape)
neg_out = deepcopy(pos_out)
for thr in thresh:
t = threshold(t_map, p_map, thr=thr)
t_pos = deepcopy(t)
t_pos.data = np.zeros(len(t_pos.data))
t_neg = deepcopy(t_pos)
t_pos.data[t.data > 0] = 1
t_neg.data[t.data < 0] = 1
pos_out = pos_out+t_pos.to_nifti().get_data()
neg_out = neg_out+t_neg.to_nifti().get_data()
pos_out = pos_out + neg_out*-1
return Brain_Data(nib.Nifti1Image(pos_out, affine))
def winsorize(data, cutoff=None, replace_with_cutoff=True):
''' Winsorize a Pandas DataFrame or Series with the largest/lowest value not considered outlier
Args:
data: (pd.DataFrame, pd.Series) data to winsorize
cutoff: (dict) a dictionary with keys {'std':[low,high]} or
{'quantile':[low,high]}
replace_with_cutoff: (bool) If True, replace outliers with cutoff.
If False, replaces outliers with closest
existing values; (default: False)
Returns:
out: (pd.DataFrame, pd.Series) winsorized data
'''
return _transform_outliers(data, cutoff, replace_with_cutoff=replace_with_cutoff, method='winsorize')
def trim(data, cutoff=None):
''' Trim a Pandas DataFrame or Series by replacing outlier values with NaNs
Args:
data: (pd.DataFrame, pd.Series) data to trim
cutoff: (dict) a dictionary with keys {'std':[low,high]} or
{'quantile':[low,high]}
Returns:
out: (pd.DataFrame, pd.Series) trimmed data
'''
return _transform_outliers(data, cutoff, replace_with_cutoff=None, method='trim')
def _transform_outliers(data, cutoff, replace_with_cutoff, method):
''' This function is not exposed to user but is called by either trim
or winsorize.
Args:
data: (pd.DataFrame, pd.Series) data to transform
cutoff: (dict) a dictionary with keys {'std':[low,high]} or
{'quantile':[low,high]}
replace_with_cutoff: (bool) If True, replace outliers with cutoff.
If False, replaces outliers with closest
existing values. (default: False)
method: 'winsorize' or 'trim'
Returns:
out: (pd.DataFrame, pd.Series) transformed data
'''
df = data.copy() # To not overwrite data make a copy
def _transform_outliers_sub(data, cutoff, replace_with_cutoff, method='trim'):
if not isinstance(data, pd.Series):
raise ValueError('Make sure that you are applying winsorize to a pandas dataframe or series.')
if isinstance(cutoff, dict):
# calculate cutoff values
if 'quantile' in cutoff:
q = data.quantile(cutoff['quantile'])
elif 'std' in cutoff:
std = [data.mean()-data.std()*cutoff['std'][0], data.mean()+data.std()*cutoff['std'][1]]
q = pd.Series(index=cutoff['std'], data=std)
# if replace_with_cutoff is false, replace with true existing values closest to cutoff
if method == 'winsorize':
if not replace_with_cutoff:
q.iloc[0] = data[data > q.iloc[0]].min()
q.iloc[1] = data[data < q.iloc[1]].max()
else:
raise ValueError('cutoff must be a dictionary with quantile or std keys.')
if method == 'winsorize':
if isinstance(q, pd.Series) and len(q) == 2:
data[data < q.iloc[0]] = q.iloc[0]
data[data > q.iloc[1]] = q.iloc[1]
elif method == 'trim':
data[data < q.iloc[0]] = np.nan
data[data > q.iloc[1]] = np.nan
return data
# transform each column if a dataframe, if series just transform data
if isinstance(df, pd.DataFrame):
for col in df.columns:
df.loc[:, col] = _transform_outliers_sub(df.loc[:, col], cutoff=cutoff, replace_with_cutoff=replace_with_cutoff, method=method)
return df
elif isinstance(df, pd.Series):
return _transform_outliers_sub(df, cutoff=cutoff, replace_with_cutoff=replace_with_cutoff, method=method)
else:
raise ValueError('Data must be a pandas DataFrame or Series')
def calc_bpm(beat_interval, sampling_freq):
''' Calculate instantaneous BPM from beat to beat interval
Args:
beat_interval: (int) number of samples in between each beat
(typically R-R Interval)
sampling_freq: (float) sampling frequency in Hz
Returns:
bpm: (float) beats per minute for time interval
'''
return 60*sampling_freq*(1/(beat_interval))
def downsample(data, sampling_freq=None, target=None, target_type='samples',
method='mean'):
''' Downsample pandas to a new target frequency or number of samples
using averaging.
Args:
data: (pd.DataFrame, pd.Series) data to downsample
sampling_freq: (float) Sampling frequency of data in hertz
target: (float) downsampling target
target_type: type of target can be [samples,seconds,hz]
method: (str) type of downsample method ['mean','median'],
default: mean
Returns:
out: (pd.DataFrame, pd.Series) downsmapled data
'''
if not isinstance(data, (pd.DataFrame, pd.Series)):
raise ValueError('Data must by a pandas DataFrame or Series instance.')
if not (method == 'median') | (method == 'mean'):
raise ValueError("Metric must be either 'mean' or 'median' ")
if target_type == 'samples':
n_samples = target
elif target_type == 'seconds':
n_samples = target*sampling_freq
elif target_type == 'hz':
n_samples = sampling_freq/target
else:
raise ValueError('Make sure target_type is "samples", "seconds", '
' or "hz".')
idx = np.sort(np.repeat(np.arange(1, data.shape[0]/n_samples, 1), n_samples))
# if data.shape[0] % n_samples:
if data.shape[0] > len(idx):
idx = np.concatenate([idx, np.repeat(idx[-1]+1, data.shape[0]-len(idx))])
if method == 'mean':
return data.groupby(idx).mean().reset_index(drop=True)
elif method == 'median':
return data.groupby(idx).median().reset_index(drop=True)
def upsample(data, sampling_freq=None, target=None, target_type='samples', method='linear'):
''' Upsample pandas to a new target frequency or number of samples using interpolation.
Args:
data: (pd.DataFrame, pd.Series) data to upsample
(Note: will drop non-numeric columns from DataFrame)
sampling_freq: Sampling frequency of data in hertz
target: (float) upsampling target
target_type: (str) type of target can be [samples,seconds,hz]
method: (str) ['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic']
where 'zero', 'slinear', 'quadratic' and 'cubic'
refer to a spline interpolation of zeroth, first,
second or third order (default: linear)
Returns:
upsampled pandas object
'''
methods = ['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic']
if method not in methods:
raise ValueError("Method must be 'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'")
if target_type == 'samples':
n_samples = target
elif target_type == 'seconds':
n_samples = target*sampling_freq
elif target_type == 'hz':
n_samples = float(sampling_freq)/float(target)
else:
raise ValueError('Make sure target_type is "samples", "seconds", or "hz".')
orig_spacing = np.arange(0, data.shape[0], 1)
new_spacing = np.arange(0, data.shape[0]-1, n_samples)
if isinstance(data, pd.Series):
interpolate = interp1d(orig_spacing, data, kind=method)
return interpolate(new_spacing)
elif isinstance(data, pd.DataFrame):
numeric_data = data._get_numeric_data()
if data.shape[1] != numeric_data.shape[1]:
warnings.warn('Dropping %s non-numeric columns' % (data.shape[1] - numeric_data.shape[1]), UserWarning)
out = pd.DataFrame(columns=numeric_data.columns, index=None)
for i, x in numeric_data.iteritems():
interpolate = interp1d(orig_spacing, x, kind=method)
out.loc[:, i] = interpolate(new_spacing)
return out
else:
raise ValueError('Data must by a pandas DataFrame or Series instance.')
def fisher_r_to_z(r):
''' Use Fisher transformation to convert correlation to z score '''
return .5*np.log((1+r)/(1-r))
def correlation(data1, data2, metric='pearson'):
''' This function calculates the correlation between data1 and data2
Args:
data1: (np.array) x
data2: (np.array) y
metric: (str) type of correlation ["spearman" or "pearson" or "kendall"]
Returns:
r: (np.array) correlations
p: (float) p-value
'''
if metric == 'spearman':
func = spearmanr
elif metric == 'pearson':
func = pearsonr
elif metric == 'kendall':
func = kendalltau
else:
raise ValueError('metric must be "spearman" or "pearson" or "kendall"')
return func(data1, data2)
def _permute_sign(data, random_state=None):
random_state = check_random_state(random_state)
return np.mean(data*random_state.choice([1, -1], len(data)))
def _permute_group(data, random_state=None):
random_state = check_random_state(random_state)
perm_label = random_state.permutation(data['Group'])
return (np.mean(data.loc[perm_label == 1, 'Values']) - np.mean(data.loc[perm_label == 0, 'Values']))
def _permute_func(data1, data2, metric, random_state=None):
""" Helper function for matrix_permutation.
Can take a functon, that would be repeated for calculation.
Args:
data1: (np.array) squareform matrix
data2: flattened np array (same size upper triangle of data1)
metric: similarity/distance function from scipy.stats (e.g., spearman, pearson, kendall etc)
random_state: random_state instance for permutation
Returns:
r: r value of function
"""
random_state = check_random_state(random_state)
data_row_id = range(data1.shape[0])
permuted_ix = random_state.choice(data_row_id,
size=len(data_row_id), replace=False)
new_fmri_dist = data1.iloc[permuted_ix, permuted_ix].values
new_fmri_dist = new_fmri_dist[np.triu_indices(new_fmri_dist.shape[0], k=1)]
return correlation(new_fmri_dist, data2, metric=metric)[0]
def _calc_pvalue(all_p, stat, tail):
"""Calculates p value based on distribution of correlations
This function is called by the permutation functions
all_p: list of correlation values from permutation
stat: actual value being tested, i.e., stats['correlation'] or stats['mean']
tail: (int) either 2 or 1 for two-tailed p-value or one-tailed
"""
denom = float(len(all_p)) + 1
if tail == 2:
numer = np.sum(np.abs(all_p) >= np.abs(stat)) + 1
elif tail == 1:
if stat >= 0:
numer = np.sum(all_p >= stat) + 1
else:
numer = np.sum(all_p <= stat) + 1
else:
raise ValueError('tail must be either 1 or 2')
p = numer / denom
return p
def one_sample_permutation(data, n_permute=5000, tail=2, n_jobs=-1, return_perms=False, random_state=None):
''' One sample permutation test using randomization.
Args:
data: (pd.DataFrame, pd.Series, np.array) data to permute
n_permute: (int) number of permutations
tail: (int) either 1 for one-tail or 2 for two-tailed test (default: 2)
n_jobs: (int) The number of CPUs to use to do the computation.
-1 means all CPUs.
return_parms: (bool) Return the permutation distribution along with the p-value; default False
Returns:
stats: (dict) dictionary of permutation results ['mean','p']
'''
random_state = check_random_state(random_state)
seeds = random_state.randint(MAX_INT, size=n_permute)
data = np.array(data)
stats = dict()
stats['mean'] = np.mean(data)
all_p = Parallel(n_jobs=n_jobs)(delayed(_permute_sign)(data,
random_state=seeds[i]) for i in range(n_permute))
stats['p'] = _calc_pvalue(all_p, stats['mean'], tail)
if return_perms:
stats['perm_dist'] = all_p
return stats
def two_sample_permutation(data1, data2, n_permute=5000,
tail=2, n_jobs=-1, return_perms=False, random_state=None):
''' Independent sample permutation test.
Args:
data1: (pd.DataFrame, pd.Series, np.array) dataset 1 to permute
data2: (pd.DataFrame, pd.Series, np.array) dataset 2 to permute
n_permute: (int) number of permutations
tail: (int) either 1 for one-tail or 2 for two-tailed test (default: 2)
n_jobs: (int) The number of CPUs to use to do the computation.
-1 means all CPUs.
return_parms: (bool) Return the permutation distribution along with the p-value; default False
Returns:
stats: (dict) dictionary of permutation results ['mean','p']
'''
random_state = check_random_state(random_state)
seeds = random_state.randint(MAX_INT, size=n_permute)
stats = dict()
stats['mean'] = np.mean(data1)-np.mean(data2)
data = pd.DataFrame(data={'Values': data1, 'Group': np.ones(len(data1))})
data = data.append(pd.DataFrame(data={
'Values': data2,
'Group': np.zeros(len(data2))}))
all_p = Parallel(n_jobs=n_jobs)(delayed(_permute_group)(data,
random_state=seeds[i]) for i in range(n_permute))
stats['p'] = _calc_pvalue(all_p, stats['mean'], tail)
if return_perms:
stats['perm_dist'] = all_p
return stats
def correlation_permutation(data1, data2, n_permute=5000, metric='spearman',
tail=2, n_jobs=-1, return_perms=False, random_state=None):
''' Permute correlation.
Args:
data1: (pd.DataFrame, pd.Series, np.array) dataset 1 to permute
data2: (pd.DataFrame, pd.Series, np.array) dataset 2 to permute
n_permute: (int) number of permutations
metric: (str) type of association metric ['spearman','pearson',
'kendall']
tail: (int) either 1 for one-tail or 2 for two-tailed test (default: 2)
n_jobs: (int) The number of CPUs to use to do the computation.
-1 means all CPUs.
return_parms: (bool) Return the permutation distribution along with the p-value; default False
Returns:
stats: (dict) dictionary of permutation results ['correlation','p']
'''
random_state = check_random_state(random_state)
stats = dict()
data1 = np.array(data1)
data2 = np.array(data2)
stats['correlation'] = correlation(data1, data2, metric=metric)[0]
all_p = Parallel(n_jobs=n_jobs)(delayed(correlation)(
random_state.permutation(data1), data2, metric=metric)
for i in range(n_permute))
all_p = [x[0] for x in all_p]
stats['p'] = _calc_pvalue(all_p, stats['correlation'], tail)
if return_perms:
stats['perm_dist'] = all_p
return stats
def matrix_permutation(data1, data2, n_permute=5000, metric='spearman',
tail=2, n_jobs=-1, return_perms=False, random_state=None):
""" Permute 2-dimensional matrix correlation (mantel test).
Chen, G. et al. (2016). Untangling the relatedness among correlations,
part I: nonparametric approaches to inter-subject correlation analysis
at the group level. Neuroimage, 142, 248-259.
Args:
data1: (pd.DataFrame, np.array) square matrix
data2: (pd.DataFrame, np.array) square matrix
n_permute: (int) number of permutations
metric: (str) type of association metric ['spearman','pearson',
'kendall']
tail: (int) either 1 for one-tail or 2 for two-tailed test
(default: 2)
n_jobs: (int) The number of CPUs to use to do the computation.
-1 means all CPUs.
return_parms: (bool) Return the permutation distribution along with the p-value; default False
Returns:
stats: (dict) dictionary of permutation results ['correlation','p']
"""
random_state = check_random_state(random_state)
seeds = random_state.randint(MAX_INT, size=n_permute)
sq_data1 = check_square_numpy_matrix(data1)
sq_data2 = check_square_numpy_matrix(data2)
data1 = sq_data1[np.triu_indices(sq_data1.shape[0], k=1)]
data2 = sq_data2[np.triu_indices(sq_data2.shape[0], k=1)]
stats = dict()
stats['correlation'] = correlation(data1, data2, metric=metric)[0]
all_p = Parallel(n_jobs=n_jobs)(delayed(_permute_func)(
pd.DataFrame(sq_data1), data2, metric=metric, random_state=seeds[i])
for i in range(n_permute))
stats['p'] = _calc_pvalue(all_p, stats['correlation'], tail)
if return_perms:
stats['perm_dist'] = all_p
return stats
def jackknife_permutation(data1, data2, metric='spearman',
p_value='permutation', n_jobs=-1, n_permute=5000,
tail=2, random_state=None):
''' This function uses a randomization test on a jackknife of absolute
distance/similarity of each subject
Args:
data1: (Adjacency, pd.DataFrame, np.array) square matrix
data2: (Adjacency, pd.DataFrame, np.array) square matrix
metric: (str) type of association metric ['spearman','pearson',
'kendall']
tail: (int) either 1 for one-tail or 2 for two-tailed test (default: 2)
p_value: ['ttest', 'permutation']
n_permute: (int) number of permutations
n_jobs: (int) The number of CPUs to use to do the computation.
-1 means all CPUs.
Returns:
stats: (dict) dictionary of permutation results ['correlation','p']
'''
random_state = check_random_state(random_state)
data1 = check_square_numpy_matrix(data1)
data2 = check_square_numpy_matrix(data2)
stats = {}
stats['all_r'] = []
for s in range(data1.shape[0]):
stats['all_r'].append(correlation(np.delete(data1[s, ], s),
np.delete(data2[s, ], s),
metric=metric)[0])
stats['correlation'] = np.mean(stats['all_r'])
if p_value == 'permutation':
stats_permute = one_sample_permutation(stats['all_r'],
n_permute=n_permute, tail=tail,
n_jobs=n_jobs,
random_state=random_state)
stats['p'] = stats_permute['p']
elif p_value == 'ttest':
stats['p'] = ttest_1samp(stats['all_r'], 0)[1]
else:
raise NotImplementedError("Only ['ttest', 'permutation'] are currently implemented.")
return stats
def make_cosine_basis(nsamples, sampling_freq, filter_length, unit_scale=True, drop=0):
""" Create a series of cosine basis functions for a discrete cosine
transform. Based off of implementation in spm_filter and spm_dctmtx
because scipy dct can only apply transforms but not return the basis
functions. Like SPM, does not add constant (i.e. intercept), but does
retain first basis (i.e. sigmoidal/linear drift)
Args:
nsamples (int): number of observations (e.g. TRs)
sampling_freq (float): sampling frequency in hertz (i.e. 1 / TR)
filter_length (int): length of filter in seconds
unit_scale (true): assure that the basis functions are on the normalized range [-1, 1]; default True
drop (int): index of which early/slow bases to drop if any; default is
to drop constant (i.e. intercept) like SPM. Unlike SPM, retains
first basis (i.e. linear/sigmoidal). Will cumulatively drop bases
up to and inclusive of index provided (e.g. 2, drops bases 1 and 2)
Returns:
out (ndarray): nsamples x number of basis sets numpy array
"""
# Figure out number of basis functions to create
order = int(np.fix(2 * (nsamples * sampling_freq)/filter_length + 1))
n = np.arange(nsamples)
# Initialize basis function matrix
C = np.zeros((len(n), order))
# Add constant
C[:, 0] = np.ones((1, len(n)))/np.sqrt(nsamples)
# Insert higher order cosine basis functions
for i in range(1, order):
C[:, i] = np.sqrt(2./nsamples) * np.cos(np.pi*(2*n+1) * i/(2*nsamples))
# Drop intercept ala SPM
C = C[:, 1:]
if C.size == 0:
raise ValueError('Basis function creation failed! nsamples is too small for requested filter_length.')
if unit_scale:
C *= 1. / C[0, 0]
C = C[:, drop:]
return C
def transform_pairwise(X, y):
'''Transforms data into pairs with balanced labels for ranking
Transforms a n-class ranking problem into a two-class classification
problem. Subclasses implementing particular strategies for choosing
pairs should override this method.
In this method, all pairs are choosen, except for those that have the
same target value. The output is an array of balanced classes, i.e.
there are the same number of -1 as +1
Reference: "Large Margin Rank Boundaries for Ordinal Regression",
<NAME>, <NAME>, <NAME>.
Authors: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
Args:
X: (np.array), shape (n_samples, n_features)
The data
y: (np.array), shape (n_samples,) or (n_samples, 2)
Target labels. If it's a 2D array, the second column represents
the grouping of samples, i.e., samples with different groups will
not be considered.
Returns:
X_trans: (np.array), shape (k, n_feaures)
Data as pairs, where k = n_samples * (n_samples-1)) / 2 if grouping
values were not passed. If grouping variables exist, then returns
values computed for each group.
y_trans: (np.array), shape (k,)
Output class labels, where classes have values {-1, +1}
If y was shape (n_samples, 2), then returns (k, 2) with groups on
the second dimension.
'''
X_new, y_new, y_group = [], [], []
y_ndim = y.ndim
if y.ndim == 1:
y = np.c_[y, np.ones(y.shape[0])]
comb = itertools.combinations(range(X.shape[0]), 2)
for k, (i, j) in enumerate(comb):
if y[i, 0] == y[j, 0] or y[i, 1] != y[j, 1]:
# skip if same target or different group
continue
X_new.append(X[i] - X[j])
y_new.append(np.sign(y[i, 0] - y[j, 0]))
y_group.append(y[i, 1])
# output balanced classes
if y_new[-1] != (-1) ** k:
y_new[-1] = - y_new[-1]
X_new[-1] = - X_new[-1]
if y_ndim == 1:
return np.asarray(X_new), np.asarray(y_new).ravel()
elif y_ndim == 2:
return np.asarray(X_new), np.vstack((np.asarray(y_new), np.asarray(y_group))).T
def _robust_estimator(vals, X, robust_estimator='hc0', nlags=1):
"""
Computes robust sandwich estimators for standard errors used in OLS computation. Types include:
'hc0': Huber (1980) sandwich estimator to return robust standard error estimates.
'hc3': MacKinnon and White (1985) HC3 sandwich estimator. Provides more robustness in smaller samples than HC0 Long & Ervin (2000)
'hac': Newey-West (1987) estimator for robustness to heteroscedasticity as well as serial auto-correlation at given lags.
Refs: https://www.wikiwand.com/en/Heteroscedasticity-consistent_standard_errors
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py
https://cran.r-project.org/web/packages/sandwich/vignettes/sandwich.pdf
https://www.stata.com/manuals13/tsnewey.pdf
Args:
vals (np.ndarray): 1d array of residuals
X (np.ndarray): design matrix used in OLS, e.g. Brain_Data().X
robust_estimator (str): estimator type, 'hc0' (default), 'hc3', or 'hac'
nlags (int): number of lags, only used with 'hac' estimator, default is 1
Returns:
stderr (np.ndarray): 1d array of standard errors with length == X.shape[1]
"""
if robust_estimator not in ['hc0', 'hc3', 'hac']:
raise ValueError("robust_estimator must be one of hc0, hc3 or hac")
# Make a sandwich!
# First we need bread
bread = np.linalg.pinv(np.dot(X.T, X))
# Then we need meat
if robust_estimator == 'hc0':
V = np.diag(vals**2)
meat = np.dot(np.dot(X.T, V), X)
elif robust_estimator == 'hc3':
V = np.diag(vals**2)/(1-np.diag(np.dot(X, np.dot(bread, X.T))))**2
meat = np.dot(np.dot(X.T, V), X)
elif robust_estimator == 'hac':
weights = 1 - np.arange(nlags+1.)/(nlags+1.)
# First compute lag 0
V = np.diag(vals**2)
meat = weights[0] * np.dot(
|
np.dot(X.T, V)
|
numpy.dot
|
# pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103,R0903,R0904
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from unittest.mock import MagicMock
from astropy.utils import NumpyRNGContext
from glue import core
from glue.utils import broadcast_to
from ..component import Component, DerivedComponent, CategoricalComponent, DateTimeComponent
from ..component_id import ComponentID
from ..component_link import ComponentLink, CoordinateComponentLink, BinaryComponentLink
from ..coordinates import Coordinates, IdentityCoordinates
from ..data import Data, pixel_label, BaseCartesianData
from ..link_helpers import LinkSame
from ..data_collection import DataCollection
from ..exceptions import IncompatibleAttribute
from ..hub import Hub
from ..registry import Registry
from ..subset import (Subset, CategoricalROISubsetState, SubsetState,
RoiSubsetState, RangeSubsetState, SliceSubsetState,
CategoricalMultiRangeSubsetState, MaskSubsetState,
CategoricalROISubsetState2D, AndState, roi_to_subset_state)
from ..roi import PolygonalROI, CategoricalROI, RangeROI, RectangularROI
from .test_state import clone
class _TestCoordinates(Coordinates):
def __init__(self):
super().__init__(pixel_n_dim=2, world_n_dim=2)
def pixel_to_world_values(self, *args):
return [(i + 2.) * a for i, a in enumerate(args)]
def world_to_pixel_values(self, *args):
return [a / (i + 2.) for i, a in enumerate(args)]
class TestData(object):
def setup_method(self, method):
self.data = Data(label="Test Data")
Registry().clear()
comp = Component(np.random.random((2, 3)))
self.comp = comp
self.data.coords = _TestCoordinates()
self.comp_id = self.data.add_component(comp, 'Test Component')
def test_2d_component_print(self):
assert str(self.comp) == 'Component with shape (2, 3)'
def test_shape_empty(self):
d = Data()
assert d.shape == ()
def test_ndim_empty(self):
d = Data()
assert d.ndim == 0
def test_shape(self):
assert self.data.shape == (2, 3)
def test_ndim(self):
assert self.data.ndim == 2
def test_size(self):
assert self.data.size == 6
def test_label(self):
d = Data()
assert d.label == ''
assert self.data.label == "Test Data"
def test_set_label(self):
d = Data()
d.label = 'test_set_label'
assert d.label == 'test_set_label'
def test_add_component_with_id(self):
cid = ComponentID("test")
comp = Component(np.random.random((2, 3)))
cid2 = self.data.add_component(comp, cid)
assert cid2 is cid
def test_add_component_via_setitem(self):
d = Data(x=[1, 2, 3])
d['y'] = d['x'] * 2
np.testing.assert_array_equal(d['y'], [2, 4, 6])
def test_add_component_incompatible_shape(self):
comp = MagicMock()
comp.data.shape = (3, 2)
with pytest.raises(TypeError) as exc:
self.data.add_component(comp("junk label"))
assert exc.value.args[0] == ("add_component() missing 1 required "
"positional argument: 'label'")
def test_get_getitem_incompatible_attribute(self):
cid = ComponentID('bad')
with pytest.raises(IncompatibleAttribute) as exc:
self.data.__getitem__(cid)
assert exc.value.args[0] is cid
def test_get_component_incompatible_attribute(self):
cid = ComponentID('bad')
with pytest.raises(IncompatibleAttribute) as exc:
self.data.get_component(cid)
assert exc.value.args[0] is cid
def test_get_component_name(self):
d = Data(x=[1, 2, 3])
assert isinstance(d.get_component('x'), Component)
def test_component_ids(self):
cid = self.data.component_ids()
assert self.comp_id in cid
def test_new_subset(self):
sub = self.data.new_subset()
assert sub in self.data.subsets
def test_data_not_created_with_subsets(self):
assert len(self.data.subsets) == 0
def test_register(self):
hub = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
assert hub is self.data.hub
def test_component_order(self):
"""Components should be returned in the order they were specified"""
data = Data()
comp = Component(np.array([1, 2, 3]))
labels = 'asldfkjaAREGWoibasiwnsldkgajsldkgslkg'
for label in labels:
data.add_component(comp, label)
ids = data.main_components
assert [cid.label for cid in ids] == list(labels)
def test_broadcast(self):
hub = MagicMock(spec_set=Hub)
# make sure broadcasting with no hub is ok
self.data.broadcast('testing')
# make sure broadcast with hub gets relayed
self.data.register_to_hub(hub)
self.data.broadcast('testing')
assert hub.broadcast.call_count == 1
def test_double_hub_add(self):
hub = MagicMock(spec_set=Hub)
hub2 = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
with pytest.raises(AttributeError) as exc:
self.data.__setattr__('hub', hub2)
assert exc.value.args[0] == ("Data has already been assigned "
"to a different hub")
def test_main_components(self):
compid = ComponentID('virtual')
link = MagicMock(spec_set=ComponentLink)
comp = DerivedComponent(self.data, link)
self.data.add_component(comp, compid)
main_comps = self.data.main_components
assert self.comp_id in main_comps
assert compid not in main_comps
def test_add_component_invalid_component(self):
comp = Component(np.array([1]))
with pytest.raises(ValueError) as exc:
self.data.add_component(comp, label='bad')
assert exc.value.args[0].startswith("The dimensions of component bad")
def test_add_component_link(self):
link = MagicMock(spec_set=ComponentLink)
cid = ComponentID("new id")
link.get_to_id.return_value = cid
self.data.add_component_link(link)
assert cid in self.data.derived_components
def test_derived_components(self):
compid = ComponentID('virtual')
link = MagicMock(spec_set=ComponentLink)
comp = DerivedComponent(self.data, link)
self.data.add_component(comp, compid)
pricomps = self.data.derived_components
assert self.comp_id not in pricomps
assert compid in pricomps
def test_str_empty(self):
d = Data()
str(d)
def test_str_(self):
str(self.data)
def test_add_derived_component(self):
compid = ComponentID('virtual')
link = MagicMock(spec_set=ComponentLink)
comp = DerivedComponent(self.data, link)
comp.data.shape = self.data.shape
self.data.add_component(comp, compid)
result = self.data[compid]
link.compute.assert_called_with(self.data)
def test_find_component_id(self):
cid = self.data.find_component_id('Test Component')
assert cid == self.comp_id
assert self.data.find_component_id('does not exist') is None
def test_add_subset(self):
s = Subset(Data())
self.data.add_subset(s)
assert s in self.data.subsets
def test_add_subset_with_subset_state(self):
"""Passing a subset state auto-wraps into a subset object"""
state = SubsetState()
self.data.add_subset(state)
added = self.data.subsets[-1]
assert added.subset_state is state
assert added.data is self.data
def test_add_subset_reparents_subset(self):
"""add_subset method updates subset.data reference"""
s = Subset(None)
self.data.add_subset(s)
assert s.data is self.data
def test_add_subset_disambiguates_label(self):
"""adding subset should disambiguate label if needed"""
s1 = Subset(None)
self.data.add_subset(s1)
s1.label = "test_subset_label"
s2 = Subset(None)
s2.label = "test_subset_label"
assert s2.label == "test_subset_label"
self.data.add_subset(s2)
assert s2.label != "test_subset_label"
def test_add_subset_with_hub(self):
s = Subset(None)
hub = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
self.data.add_subset(s)
assert s in self.data.subsets
assert hub.broadcast.call_count == 1
def test_remove_component(self):
hub = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
self.data.remove_component(self.comp_id)
assert self.comp_id not in self.data.components
assert hub.broadcast.call_count == 2
def test_get_component(self):
assert self.data.get_component(self.comp_id) is self.comp
def test_get_None_component(self):
with pytest.raises(IncompatibleAttribute):
self.data.get_component(None)
def test_get_item(self):
assert self.data[self.comp_id] is self.comp.data
def test_coordinate_links(self):
links = self.data.coordinate_links
w0 = self.data[self.data.world_component_ids[0]]
w1 = self.data[self.data.world_component_ids[1]]
p0 = self.data[self.data.pixel_component_ids[0]]
p1 = self.data[self.data.pixel_component_ids[1]]
w0prime = links[0].compute(self.data)
p0prime = links[1].compute(self.data)
w1prime = links[2].compute(self.data)
p1prime = links[3].compute(self.data)
np.testing.assert_array_equal(w0, w0prime)
np.testing.assert_array_equal(w1, w1prime)
np.testing.assert_array_equal(p0, p0prime)
np.testing.assert_array_equal(p1, p1prime)
def test_coordinate_links_empty_data(self):
d = Data()
d.coords = None
assert d.coordinate_links == []
def test_coordinate_links_idempotent(self):
"""Should only calculate links once, and
return the same objects every time"""
links = self.data.coordinate_links
links2 = self.data.coordinate_links
assert links == links2
def test_fancy_view(self):
result = self.data[self.comp_id, :, 2]
np.testing.assert_array_equal(result, self.data[self.comp_id][:, 2])
def test_get_by_string(self):
result = self.data['Test Component']
assert result is self.comp.data
def test_get_by_missing_string(self):
with pytest.raises(IncompatibleAttribute) as exc:
result = self.data['xyz']
assert exc.value.args[0] == 'xyz'
def test_immutable(self):
d = Data(x=[1, 2, 3])
with pytest.raises(ValueError) as exc:
d['x'][:] = 5
assert 'read-only' in exc.value.args[0]
assert not d['x'].flags['WRITEABLE']
@pytest.mark.xfail
def test_categorical_immutable(self):
d = Data()
c = CategoricalComponent(['M', 'M', 'F'], categories=['M', 'F'])
d.add_component(c, label='gender')
with pytest.raises(ValueError) as exc:
d['gender'][:] = 5
assert 'read-only' in exc.value.args[0]
assert not d['gender'].flags['WRITEABLE']
def test_update_components(self):
d = Data(x=[1, 2, 3], y=[1, 2, 3])
d.update_components({d.id['x']: [10, 20, 30]})
np.testing.assert_array_equal(d['x'],[10, 20, 30])
def test_update_categorical_components(self):
d = Data(x=['M', 'M', 'F'], y=['F', 'F', 'M'])
d.update_components({d.id['x']: ['M', 'F', 'M']})
np.testing.assert_array_equal(d['x'],np.array(['M', 'F', 'M']))
assert isinstance(d.get_component(d.id['x']),CategoricalComponent)
def test_update_nd_categorical_components(self):
d = Data(x=[['M', 'M', 'F'],['F', 'F', 'M']])
d.update_components({d.id['x']: [['M', 'F', 'M'],['F','M','F']]})
np.testing.assert_array_equal(d['x'],[['M', 'F', 'M'],['F','M','F']])
assert isinstance(d.get_component(d.id['x']),CategoricalComponent)
def test_update_clears_subset_cache(self):
from ..roi import RectangularROI
d = Data(x=[1, 2, 3], y=[1, 2, 3])
s = d.new_subset()
state = core.subset.RoiSubsetState()
state.xatt = d.id['x']
state.yatt = d.id['y']
state.roi = RectangularROI(xmin=1.5, xmax=2.5, ymin=1.5, ymax=2.5)
s.subset_state = state
np.testing.assert_array_equal(s.to_mask(), [False, True, False])
d.update_components({d.id['x']: [10, 20, 30]})
np.testing.assert_array_equal(s.to_mask(), [False, False, False])
def test_add_derived_implicit(self):
# Regression test for a bug that caused derived components added via
# the data[...] = ... syntax to have links that did not include a 'to'
# argument, leading the link manager to add a ghost component to the
# data.
from ..data_collection import DataCollection
dc = DataCollection([])
data = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1')
dc.append(data)
data['z'] = data.id['x'] + 1
# There should be four components: x, y, z, and pixel
assert len(data.components) == 4
def test_remove_derived_dependency(self):
# Regression test for a bug that occurred when removing a component
# used in a derived component, which should also remove the derived
# component itself. To make things more fun, we set up a chain of
# derived components to make sure they are all removed.
data = Data(a=[1, 2, 3], b=[2, 3, 4], label='data1')
data['c'] = data.id['a'] + 1
data['d'] = data.id['c'] + 1
data['e'] = data.id['d'] + 1
data['f'] = data.id['e'] + 1
a_id = data.id['a']
b_id = data.id['b']
c_id = data.id['c']
d_id = data.id['d']
e_id = data.id['e']
f_id = data.id['f']
# There should be seven components: pixel, a, b, c, d, e, f
assert len(data.components) == 7
data.remove_component(data.id['d'])
# This should also remove e and f since they depend on d
assert len(data.components) == 4
assert a_id in data.components
assert b_id in data.components
assert c_id in data.components
assert d_id not in data.components
assert e_id not in data.components
assert f_id not in data.components
def test_links_property(self):
data = Data(a=[1, 2, 3], b=[2, 3, 4], label='data1',
coords=IdentityCoordinates(n_dim=1))
assert len(data.links) == 2
assert isinstance(data.links[0], CoordinateComponentLink)
assert isinstance(data.links[1], CoordinateComponentLink)
data['c'] = data.id['a'] + 1
assert len(data.links) == 3
assert isinstance(data.links[2], BinaryComponentLink)
class TestROICreation(object):
def test_range_roi(self):
d = Data(xdata=[1, 2, 3], ydata=[1, 2, 3])
comp = d.get_component(d.id['xdata'])
roi = RangeROI('x', min=2, max=3)
s = roi_to_subset_state(roi, x_att='xdata')
assert isinstance(s, RangeSubsetState)
np.testing.assert_array_equal((s.lo, s.hi),
[2, 3])
roi = RangeROI('y', min=2, max=3)
s = roi_to_subset_state(roi,
x_att='xdata',
y_att='ydata')
assert isinstance(s, RangeSubsetState)
assert s.att == 'ydata'
def test_range_roi_categorical(self):
d = Data(x=['a', 'b', 'c'], y=[1, 2, 3])
comp = d.get_component(d.id['x'])
roi = CategoricalROI(['b', 'c'])
s = roi_to_subset_state(roi, x_att=d.id['x'], x_categories=comp.categories)
assert isinstance(s, CategoricalROISubsetState)
np.testing.assert_array_equal((s.roi.contains(['a', 'b', 'c'], None)),
[False, True, True])
roi = RangeROI('x', min=1, max=3)
s = roi_to_subset_state(roi, x_att='x', x_categories=comp.categories)
assert isinstance(s, CategoricalROISubsetState)
np.testing.assert_array_equal((s.roi.contains(['a', 'b', 'c'], None)),
[False, True, True])
def test_polygon_roi(self):
d = Data(x=[1, 1.3, 3, 10], y=[1, 1.5, 3, 10])
roi = PolygonalROI([0, 0, 2, 2], [0, 2, 2, 0])
s = roi_to_subset_state(roi, x_att=d.id['x'], y_att=d.id['y'])
assert isinstance(s, RoiSubsetState)
np.testing.assert_array_equal(s.to_mask(d), [True, True, False, False])
def test_polygon_categorical_rectangular(self):
d = Data(x=[1, 1.3, 3, 10], y=['a', 'b', 'c', 'd'])
y_comp = d.get_component(d.id['y'])
roi = PolygonalROI([0, 0, 2, 2], [0, 2, 2, 0])
s = roi_to_subset_state(roi, x_att='x', y_att='y', y_categories=y_comp.categories)
assert isinstance(s, CategoricalMultiRangeSubsetState)
np.testing.assert_array_equal(s.to_mask(d), [True, True, False, False])
def test_polygon_categorical_arbitrary(self):
d = Data(x=[1, 1.3, 3, 10], y=['a', 'b', 'c', 'd'])
y_comp = d.get_component(d.id['y'])
roi = PolygonalROI([0, 4, 4, 1, 0], [-0.5, 3.5, 0, -1, -0.5])
s = roi_to_subset_state(roi, x_att='x', y_att='y', y_categories=y_comp.categories)
assert isinstance(s, CategoricalMultiRangeSubsetState)
np.testing.assert_array_equal(s.to_mask(d), [True, False, True, False])
def test_rectangular_categorical(self):
d = Data(x=[1, 1.3, 3, 10], y=['a', 'b', 'c', 'd'])
x_comp = d.get_component(d.id['x'])
y_comp = d.get_component(d.id['y'])
roi = RectangularROI(xmin=-0.1, xmax=2.1, ymin=-0.1, ymax=2.1)
s = roi_to_subset_state(roi, x_att='x', y_att='y', y_categories=y_comp.categories)
assert isinstance(s, AndState)
np.testing.assert_array_equal(s.to_mask(d), [True, True, False, False])
s = roi_to_subset_state(roi, x_att='x', y_att='y', y_categories=y_comp.categories)
assert isinstance(s, AndState)
np.testing.assert_array_equal(s.to_mask(d), [True, True, False, False])
def test_polygon_both_categorical_arbitrary(self):
d = Data(x=['a', 'b', 'c', 'd', 'b', 'c'], y=['p', 'q', 'r', 's', 's', 'q'])
x_comp = d.get_component(d.id['x'])
y_comp = d.get_component(d.id['y'])
roi = PolygonalROI([0.5, 1.5, 2.5, 1, 0.5], [0.5, 0.5, 2.5, 3.5, 0.5])
s = roi_to_subset_state(roi,
x_att='x', x_categories=x_comp.categories,
y_att='y', y_categories=y_comp.categories)
assert isinstance(s, CategoricalROISubsetState2D)
np.testing.assert_array_equal(s.to_mask(d), [False, True, True, False, True, False])
def test_polygon_both_categorical_empty(self):
d = Data(x=['a', 'b', 'c', 'd', 'b', 'c'], y=['p', 'q', 'r', 's', 's', 'q'])
x_comp = d.get_component(d.id['x'])
y_comp = d.get_component(d.id['y'])
roi = PolygonalROI([0.5, 0.6, 0.6, 0.5], [0.5, 0.5, 0.6, 0.5])
s = roi_to_subset_state(roi,
x_att='x', x_categories=x_comp.categories,
y_att='y', y_categories=y_comp.categories)
assert isinstance(s, CategoricalROISubsetState2D)
np.testing.assert_array_equal(s.to_mask(d), [False, False, False, False, False, False])
def test_component_id_item_access():
data = Data()
c1 = Component(np.array([1, 2, 3]))
data.add_component(c1, 'values')
c2 = Component(np.array([4., 5., 6.]))
data.add_component(c2, 'Flux')
assert data.id['values'] == data.find_component_id('values')
assert data.id['Flux'] == data.find_component_id('Flux')
def test_component_id_item_access_missing():
"""id attribute should raise KeyError if requesting a bad ComponentID"""
data = Data()
with pytest.raises(KeyError):
data.id['not found']
class TestPixelLabel(object):
def test(self):
assert pixel_label(0, 2) == "0 [y]"
assert pixel_label(1, 2) == "1 [x]"
assert pixel_label(0, 3) == "0 [z]"
assert pixel_label(1, 3) == "1 [y]"
assert pixel_label(2, 3) == "2 [x]"
assert pixel_label(1, 0) == "1"
assert pixel_label(1, 4) == "1"
@pytest.mark.parametrize(('kwargs'),
[{'x': [1, 2, 3]},
{'x': np.array([1, 2, 3])},
{'x': [[1, 2, 3], [2, 3, 4]]},
{'x': [1, 2], 'y': [2, 3]}])
def test_init_with_inputs(kwargs):
"""Passing array-like objects as keywords to Data
auto-populates Components with label names = keywords"""
d = Data(**kwargs)
for label, data in kwargs.items():
np.testing.assert_array_equal(d[d.id[label]], data)
def test_init_with_invalid_kwargs():
with pytest.raises(ValueError) as exc:
d = Data(x=[1, 2], y=[1, 2, 3])
assert exc.value.args[0].startswith('The dimensions of component')
def test_getitem_with_component_link():
d = Data(x=[1, 2, 3, 4])
y = d.id['x'] * 5
np.testing.assert_array_equal(d[y], [5, 10, 15, 20])
def test_getitem_with_component_link_and_slice():
d = Data(x=[1, 2, 3, 4])
y = d.id['x'] * 5
np.testing.assert_array_equal(d[y, ::2], [5, 15])
def test_add_link_with_binary_link():
d = Data(x=[1, 2, 3, 4], y=[4, 5, 6, 7])
z = d.id['x'] + d.id['y']
d.add_component_link(z, 'z')
np.testing.assert_array_equal(d[d.id['z']], [5, 7, 9, 11])
def test_foreign_pixel_components_not_in_visible():
"""Pixel components from other data should not be visible"""
# currently, this is trivially satisfied since all coordinates are hidden
d1 = Data(x=[1], y=[2], coords=IdentityCoordinates(n_dim=1))
d2 = Data(w=[3], v=[4], coords=IdentityCoordinates(n_dim=1))
dc = DataCollection([d1, d2])
dc.add_link(LinkSame(d1.id['x'], d2.id['w']))
dc.add_link(LinkSame(d1.world_component_ids[0],
d2.world_component_ids[0]))
assert d2.pixel_component_ids[0] not in d1.main_components
np.testing.assert_array_equal(d1[d2.pixel_component_ids[0]], [0])
def test_add_binary_component():
d = Data(x=[1, 2, 3], y=[2, 3, 4])
z = d.id['x'] + d.id['y']
d.add_component(z, label='z')
np.testing.assert_array_equal(d['z'], [3, 5, 7])
EXPECTED_STR = """
Data Set: mydata
Number of dimensions: 1
Shape: 3
Main components:
- x
- y
Coordinate components:
- Pixel Axis 0 [x]
""".strip()
def test_data_str():
# Regression test for Data.__str__
d = Data(x=[1, 2, 3], y=[2, 3, 4], label='mydata')
assert str(d) == EXPECTED_STR
EXPECTED_STR_WITH_DERIVED = """
Data Set: mydata
Number of dimensions: 1
Shape: 3
Main components:
- x
- y
Derived components:
- z
Coordinate components:
- Pixel Axis 0 [x]
""".strip()
def test_data_str_with_derived():
d = Data(x=[1, 2, 3], y=[2, 3, 4], label='mydata')
d['z'] = d.id['x'] + 1
assert str(d) == EXPECTED_STR_WITH_DERIVED
def test_update_values_from_data():
d1 = Data(a=[1, 2, 3], b=[4, 5, 6], label='banana')
d2 = Data(b=[1, 2, 3, 4], c=[5, 6, 7, 8], label='apple')
d1a = d1.id['a']
d1b = d1.id['b']
d2b = d2.id['b']
d2c = d2.id['c']
d1.update_values_from_data(d2)
assert d1a not in d1.components
assert d1b in d1.components
assert d2b not in d1.components
assert d2c not in d1.components
assert [cid.label for cid in d1.main_components] == ['b', 'c']
assert d1.shape == (4,)
def test_update_values_from_data_invalid():
d1 = Data(a=[1, 2, 3], label='banana')
d1.add_component([3, 4, 5], 'a')
d2 = Data(b=[1, 2, 3, 4], c=[5, 6, 7, 8], label='apple')
with pytest.raises(ValueError) as exc:
d1.update_values_from_data(d2)
assert exc.value.args[0] == "Non-unique component labels in original data"
d1 = Data(a=[1, 2, 3], b=[4, 5, 6], label='banana')
d2 = Data(b=[1, 2, 3, 4], label='apple')
d2.add_component([5, 6, 7, 8], 'b')
with pytest.raises(ValueError) as exc:
d1.update_values_from_data(d2)
assert exc.value.args[0] == "Non-unique component labels in new data"
def test_update_values_from_data_order():
# Make sure that the order of components is preserved when calling
# Data.update_values_from_data. The final order should be first
# components that existed before, followed by new components
d1 = Data()
d1['c'] = [1, 2, 3]
d1['b'] = [2, 3, 4]
d1['j'] = [0, 1, 2]
d1['a'] = [4, 4, 4]
d1['f'] = [4, 5, 6]
d2 = Data()
d2['h'] = [4, 4, 4]
d2['j'] = [0, 1, 2]
d2['a'] = [4, 4, 4]
d2.update_values_from_data(d1)
assert [cid.label for cid in d2.main_components] == ['j', 'a', 'c', 'b', 'f']
def test_find_component_id_with_cid():
# Regression test for a bug that caused Data.find_component_id to return
# True erroneously when passing a component ID.
d1 = Data()
d1['a'] = ['a', 'b', 'c']
d1['b'] = [1, 2, 3]
assert d1.find_component_id(d1.id['a']) is d1.id['a']
assert d1.find_component_id(d1.id['b']) is d1.id['b']
def test_parent_preserved_session():
# Regression test for a bug that caused ComponentID parents to not be
# preserved when saving and restoring a session.
from ..link_helpers import LinkSame
from ..data_collection import DataCollection
d1 = Data(x=[1], y=[2], label='test1')
d2 = Data(w=[3], v=[4], label='test2')
dc = DataCollection([d1, d2])
dc.add_link(LinkSame(d1.id['x'], d2.id['w']))
assert d1.id['x'].parent is d1
assert d1.id['y'].parent is d1
assert d2.id['w'].parent is d2
assert d2.id['v'].parent is d2
dc2 = clone(dc)
assert dc2[0].id['x'].parent.label == 'test1'
assert dc2[0].id['y'].parent.label == 'test1'
assert dc2[1].id['w'].parent.label == 'test2'
assert dc2[1].id['v'].parent.label == 'test2'
def test_preserve_datetime():
# Make sure that we recognize and preserve the Numpy datetime64 format
dates = np.array([1, 2, 3], dtype='M8[D]')
data = Data(dates=dates)
assert isinstance(data.get_component('dates'), DateTimeComponent)
def test_clone_meta():
# Regression test for a bug that caused metadata to not be preserved
# when saving/loading sessions.
class CustomObject(object):
pass
data1 = Data(x=[1, 2, 3])
data1.meta['a'] = 1
data1.meta['b'] = 'test'
data1.meta['c'] = CustomObject()
data2 = clone(data1)
assert data2.meta['a'] == 1
assert data2.meta['b'] == 'test'
assert 'c' not in data2.meta
def test_update_coords():
# Make sure that when overriding coords, the world coordinate components
# are updated.
data1 = Data(x=[1, 2, 3], coords=IdentityCoordinates(n_dim=1))
assert len(data1.components) == 3
assert_equal(data1[data1.world_component_ids[0]], [0, 1, 2])
data2 = Data(x=[1, 2, 3], coords=IdentityCoordinates(n_dim=1))
assert len(data1.links) == 2
assert len(data2.links) == 2
data_collection = DataCollection([data1, data2])
assert len(data_collection.links) == 4
data_collection.add_link(LinkSame(data1.world_component_ids[0], data2.world_component_ids[0]))
assert len(data_collection.links) == 5
class CustomCoordinates(Coordinates):
def __init__(self):
super().__init__(pixel_n_dim=1, world_n_dim=1)
@property
def world_axis_names(self):
return ['Custom {0}'.format(axis) for axis in range(3)]
def world_to_pixel_values(self, *world):
return tuple([0.4 * w for w in world])
def pixel_to_world_values(self, *pixel):
return tuple([2.5 * p for p in pixel])
data1.coords = CustomCoordinates()
assert len(data1.components) == 3
assert_equal(data1[data1.world_component_ids[0]], [0, 2.5, 5])
assert sorted(cid.label for cid in data1.world_component_ids) == ['Custom 0']
# The link between the two world coordinates should be remove
assert len(data_collection.links) == 4
def test_compute_statistic_subset():
data = Data(x=list(range(10)))
result = data.compute_statistic('mean', data.id['x'], subset_state=data.id['x'] > 5)
assert_allclose(result, 7.5)
subset_state = SliceSubsetState(data, [slice(5)])
result = data.compute_statistic('mean', data.id['x'], subset_state=subset_state)
assert_allclose(result, 2.0)
@pytest.mark.parametrize('shape', (100, (30, 10), (500, 1, 30)))
def test_compute_statistic_chunks(shape):
# Make sure that when using chunks, the result is the same as without.
data = Data(x=np.random.random(shape))
axis = tuple(range(data.ndim - 1))
assert_allclose(data.compute_statistic('mean', data.id['x'], axis=axis),
data.compute_statistic('mean', data.id['x'], axis=axis, n_chunk_max=10))
def test_compute_statistic_random_subset():
data = Data(x=list(range(10)))
with NumpyRNGContext(12345):
result = data.compute_statistic('mean', data.id['x'], random_subset=5)
assert_allclose(result, 4.2)
result = data.compute_statistic('mean', data.id['x'], random_subset=5,
subset_state=MaskSubsetState([0, 1, 0, 1, 1, 1, 0, 1, 0, 1],
data.pixel_component_ids))
assert_allclose(result, 5)
def test_compute_statistic_empty_subset():
data = Data(x=np.empty((30, 20, 40)))
# A default subset state should be empty
subset_state = SubsetState()
result = data.compute_statistic('mean', data.id['x'], subset_state=subset_state)
assert_equal(result, np.nan)
result = data.compute_statistic('maximum', data.id['x'], subset_state=subset_state, axis=1)
assert_equal(result, broadcast_to(np.nan, (30, 40)))
result = data.compute_statistic('median', data.id['x'], subset_state=subset_state, axis=(1, 2))
assert_equal(result, broadcast_to(np.nan, (30)))
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state, axis=(0, 1, 2))
assert_equal(result, np.nan)
def test_compute_statistic_efficient():
# Unit test to test the logic for dealing with accessing only a minimal
# region from the data based on the smallest array that covers a given
# subset state.
array = np.ones(10 * 20 * 30 * 40).reshape((10, 20, 40, 30))
array[3:5, 6:14, :, 10:21:2] += 1
class CustomData(Data):
def get_data(self, cid, view=None):
if cid.label == 'x':
self.elements_accessed = np.ones(self.shape)[view].sum()
else:
self.elements_accessed = 0
return super().get_data(cid, view=view)
data = CustomData(x=array, y=array)
subset_state = data.id['y'] > 1.5
# First test without view
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state)
|
assert_allclose(result, 7680)
|
numpy.testing.assert_allclose
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from cleverhans.devtools.checks import CleverHansTest
from cleverhans.attacks import Attack
from cleverhans.attacks import FastGradientMethod
from cleverhans.attacks import BasicIterativeMethod
from cleverhans.attacks import MomentumIterativeMethod
from cleverhans.attacks import VirtualAdversarialMethod
from cleverhans.attacks import SaliencyMapMethod
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.attacks import ElasticNetMethod
from cleverhans.attacks import DeepFool
from cleverhans.attacks import MadryEtAl
from cleverhans.attacks import FastFeatureAdversaries
class TestAttackClassInitArguments(CleverHansTest):
def test_model(self):
import tensorflow as tf
sess = tf.Session()
# Exception is thrown when model does not have __call__ attribute
with self.assertRaises(Exception) as context:
model = tf.placeholder(tf.float32, shape=(None, 10))
Attack(model, back='tf', sess=sess)
self.assertTrue(context.exception)
def test_back(self):
# Define empty model
def model():
return True
# Exception is thrown when back is not tf or th
with self.assertRaises(Exception) as context:
Attack(model, back='test', sess=None)
self.assertTrue(context.exception)
def test_sess(self):
# Define empty model
def model():
return True
# Test that it is permitted to provide no session
Attack(model, back='tf', sess=None)
def test_sess_generate_np(self):
def model(x):
return True
class DummyAttack(Attack):
def generate(self, x, **kwargs):
return x
attack = DummyAttack(model, back='tf', sess=None)
with self.assertRaises(Exception) as context:
attack.generate_np(0.)
self.assertTrue(context.exception)
class TestParseParams(CleverHansTest):
def test_parse(self):
def model():
return True
import tensorflow as tf
sess = tf.Session()
test_attack = Attack(model, back='tf', sess=sess)
self.assertTrue(test_attack.parse_params({}))
class TestVirtualAdversarialMethod(CleverHansTest):
def setUp(self):
super(TestVirtualAdversarialMethod, self).setUp()
import tensorflow as tf
import tensorflow.contrib.slim as slim
def dummy_model(x):
net = slim.fully_connected(x, 60)
return slim.fully_connected(net, 10, activation_fn=None)
self.sess = tf.Session()
self.sess.as_default()
self.model = tf.make_template('dummy_model', dummy_model)
self.attack = VirtualAdversarialMethod(self.model, sess=self.sess)
# initialize model
with tf.name_scope('dummy_model'):
self.model(tf.placeholder(tf.float32, shape=(None, 1000)))
self.sess.run(tf.global_variables_initializer())
def test_parse_params(self):
self.attack.parse_params()
# test default values
self.assertEqual(self.attack.eps, 2.0)
self.assertEqual(self.attack.num_iterations, 1)
self.assertEqual(self.attack.xi, 1e-6)
self.assertEqual(self.attack.clip_min, None)
self.assertEqual(self.attack.clip_max, None)
def test_generate_np(self):
x_val = np.random.rand(100, 1000)
perturbation = self.attack.generate_np(x_val) - x_val
perturbation_norm = np.sqrt(np.sum(perturbation**2, axis=1))
# test perturbation norm
self.assertClose(perturbation_norm, self.attack.eps)
class TestFastGradientMethod(CleverHansTest):
def setUp(self):
super(TestFastGradientMethod, self).setUp()
import tensorflow as tf
# The world's simplest neural network
def my_model(x):
W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
h1 = tf.nn.sigmoid(tf.matmul(x, W1))
W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]], dtype=tf.float32)
res = tf.nn.softmax(tf.matmul(h1, W2))
return res
self.sess = tf.Session()
self.model = my_model
self.attack = FastGradientMethod(self.model, sess=self.sess)
def help_generate_np_gives_adversarial_example(self, ord):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=.5, ord=ord,
clip_min=-5, clip_max=5)
if ord == np.inf:
delta = np.max(np.abs(x_adv - x_val), axis=1)
elif ord == 1:
delta = np.sum(np.abs(x_adv - x_val), axis=1)
elif ord == 2:
delta = np.sum(np.square(x_adv - x_val), axis=1)**.5
self.assertClose(delta, 0.5)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.5)
def test_generate_np_gives_adversarial_example_linfinity(self):
self.help_generate_np_gives_adversarial_example(np.infty)
def test_generate_np_gives_adversarial_example_l1(self):
self.help_generate_np_gives_adversarial_example(1)
def test_generate_np_gives_adversarial_example_l2(self):
self.help_generate_np_gives_adversarial_example(2)
def test_targeted_generate_np_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
random_labs = np.random.random_integers(0, 1, 100)
random_labs_one_hot = np.zeros((100, 2))
random_labs_one_hot[np.arange(100), random_labs] = 1
x_adv = self.attack.generate_np(x_val, eps=.5, ord=np.inf,
clip_min=-5, clip_max=5,
y_target=random_labs_one_hot)
delta = np.max(np.abs(x_adv - x_val), axis=1)
self.assertClose(delta, 0.5)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(random_labs == new_labs) > 0.7)
def test_generate_np_can_be_called_with_different_eps(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
for eps in [0.1, 0.2, 0.3, 0.4]:
x_adv = self.attack.generate_np(x_val, eps=eps, ord=np.inf,
clip_min=-5.0, clip_max=5.0)
delta = np.max(np.abs(x_adv - x_val), axis=1)
self.assertClose(delta, eps)
def test_generate_np_clip_works_as_expected(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=0.5, ord=np.inf,
clip_min=-0.2, clip_max=0.1)
self.assertClose(np.min(x_adv), -0.2)
self.assertClose(np.max(x_adv), 0.1)
def test_generate_np_caches_graph_computation_for_eps_clip_or_xi(self):
import tensorflow as tf
x_val = np.random.rand(1, 2)
x_val = np.array(x_val, dtype=np.float32)
self.attack.generate_np(x_val, eps=.3, num_iterations=10,
clip_max=-5.0, clip_min=-5.0,
xi=1e-6)
old_grads = tf.gradients
def fn(*x, **y):
raise RuntimeError()
tf.gradients = fn
self.attack.generate_np(x_val, eps=.2, num_iterations=10,
clip_max=-4.0, clip_min=-4.0,
xi=1e-5)
tf.gradients = old_grads
class TestBasicIterativeMethod(TestFastGradientMethod):
def setUp(self):
super(TestBasicIterativeMethod, self).setUp()
import tensorflow as tf
# The world's simplest neural network
def my_model(x):
W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
h1 = tf.nn.sigmoid(tf.matmul(x, W1))
W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]], dtype=tf.float32)
res = tf.nn.softmax(tf.matmul(h1, W2))
return res
self.sess = tf.Session()
self.model = my_model
self.attack = BasicIterativeMethod(self.model, sess=self.sess)
def test_attack_strength(self):
"""
If clipping is not done at each iteration (not passing clip_min and
clip_max to fgm), this attack fails by
np.mean(orig_labels == new_labels) == .39.
"""
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
clip_min=0.5, clip_max=0.7,
nb_iter=5)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
def test_generate_np_does_not_cache_graph_computation_for_nb_iter(self):
import tensorflow as tf
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
clip_min=-5.0, clip_max=5.0,
nb_iter=10)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
ok = [False]
old_grads = tf.gradients
def fn(*x, **y):
ok[0] = True
return old_grads(*x, **y)
tf.gradients = fn
x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
clip_min=-5.0, clip_max=5.0,
nb_iter=11)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
tf.gradients = old_grads
self.assertTrue(ok[0])
class TestMomentumIterativeMethod(TestBasicIterativeMethod):
def setUp(self):
super(TestMomentumIterativeMethod, self).setUp()
import tensorflow as tf
# The world's simplest neural network
def my_model(x):
W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
h1 = tf.nn.sigmoid(tf.matmul(x, W1))
W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]], dtype=tf.float32)
res = tf.nn.softmax(tf.matmul(h1, W2))
return res
self.sess = tf.Session()
self.model = my_model
self.attack = MomentumIterativeMethod(self.model, sess=self.sess)
def test_generate_np_can_be_called_with_different_decay_factor(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
for dacay_factor in [0.0, 0.5, 1.0]:
x_adv = self.attack.generate_np(x_val, eps=0.5, ord=np.inf,
dacay_factor=dacay_factor,
clip_min=-5.0, clip_max=5.0)
delta = np.max(np.abs(x_adv - x_val), axis=1)
self.assertClose(delta, 0.5)
class TestCarliniWagnerL2(CleverHansTest):
def setUp(self):
super(TestCarliniWagnerL2, self).setUp()
import tensorflow as tf
# The world's simplest neural network
def my_model(x):
W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
h1 = tf.nn.sigmoid(tf.matmul(x, W1))
W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]], dtype=tf.float32)
res = tf.matmul(h1, W2)
return res
self.sess = tf.Session()
self.model = my_model
self.attack = CarliniWagnerL2(self.model, sess=self.sess)
def test_generate_np_untargeted_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=10)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
def test_generate_np_targeted_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
feed_labs = np.zeros((100, 2))
feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
x_adv = self.attack.generate_np(x_val, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=100, y_target=feed_labs)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
> 0.9)
def test_generate_gives_adversarial_example(self):
import tensorflow as tf
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
feed_labs = np.zeros((100, 2))
feed_labs[np.arange(100), orig_labs] = 1
x = tf.placeholder(tf.float32, x_val.shape)
y = tf.placeholder(tf.float32, feed_labs.shape)
x_adv_p = self.attack.generate(x, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=100, y=y)
x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
def test_generate_np_gives_clipped_adversarial_examples(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, max_iterations=10,
binary_search_steps=1,
learning_rate=1e-3,
initial_const=1,
clip_min=-0.2, clip_max=0.3,
batch_size=100)
self.assertTrue(-0.201 < np.min(x_adv))
self.assertTrue(np.max(x_adv) < .301)
def test_generate_np_high_confidence_targeted_examples(self):
import tensorflow as tf
def trivial_model(x):
W1 = tf.constant([[1, -1]], dtype=tf.float32)
res = tf.matmul(x, W1)
return res
for CONFIDENCE in [0, 2.3]:
x_val = np.random.rand(10, 1) - .5
x_val = np.array(x_val, dtype=np.float32)
feed_labs = np.zeros((10, 2))
feed_labs[np.arange(10), np.random.randint(0, 2, 10)] = 1
attack = CarliniWagnerL2(trivial_model, sess=self.sess)
x_adv = attack.generate_np(x_val,
max_iterations=100,
binary_search_steps=2,
learning_rate=1e-2,
initial_const=1,
clip_min=-10, clip_max=10,
confidence=CONFIDENCE,
y_target=feed_labs,
batch_size=10)
new_labs = self.sess.run(trivial_model(x_adv))
good_labs = new_labs[np.arange(10), np.argmax(feed_labs, axis=1)]
bad_labs = new_labs[np.arange(
10), 1 - np.argmax(feed_labs, axis=1)]
self.assertTrue(np.isclose(
0, np.min(good_labs - (bad_labs + CONFIDENCE)), atol=1e-1))
self.assertTrue(np.mean(np.argmax(new_labs, axis=1) ==
|
np.argmax(feed_labs, axis=1)
|
numpy.argmax
|
# -*- coding: utf-8 -*-
from datetime import datetime
import numpy as np
import pytest
from tidegravity import solve_longman_tide, solve_longman_tide_scalar, solve_tide_df, solve_point_corr
from tidegravity import import_trajectory, calculate_julian_century
def test_array_calculation():
lat = np.array([47.1234, 49.8901])
lon = np.array([104.9903, 105.9901])
alt =
|
np.array([1609.3, 1700.1])
|
numpy.array
|
import numpy as np #for arrays and matrices
import matplotlib.pyplot as plt #for ploting
import scipy #for math calculations
from mpl_toolkits.mplot3d import Axes3D #for 3D ploting
import math #for math constants
from matplotlib import collections as matcoll
from scipy import signal #for signal analysis
from scipy import fftpack #for fourier spectrum
from scipy.fftpack import fft
import binascii
##PARAMETERS
Fm = 3000 #kHz
Tm = 1 / Fm #sec
A = 1 #V
AM = 3
N_periods = 4 #periods displayed
Samples_per_period = 2000 #number of samples per period
N_samples = N_periods * Samples_per_period + 1 #total number of samples (in linspace)
Timestep = 1.0 / (float(Fm * Samples_per_period)) #sample spacing
#### Α ΕΡΏΤΗΜΑ ####
A_bit = Fm/1000 #(V) Amplitude of bit stream
T_b = 0.5 #(sec) bit duration
N_rand_bits = 46 #number of random bits generated
E_b = pow(A_bit, 2)*T_b
rand_bits = np.random.randint(2, size=(N_rand_bits)) #generate random bits [0,1]
def SNR_dB_lin(snr_ratio):
return 10**(snr_ratio / 10)
No_5 = E_b / SNR_dB_lin(5) #Conversion from dB to linear scale
No_15 = E_b / SNR_dB_lin(15) #Conversion from dB to linear scale
#QPSK encoding
'''
00 -> s1
01 -> s2
11 -> s3
10 -> s4
'''
qpsk_num_symbols = 100
#QPSK CONSTELLATION POINTS (00, 01, 11, 10)
qpsk_v_size = math.sqrt(E_b) #vector size of qpsk constellation
qpsk_const_points = np.random.randint(0, 4, qpsk_num_symbols) # 0 to 3
# qpsk_const_points = np.arange(0,4)
qpsk_const_degrees = qpsk_const_points*360/4.0 + 45 # 45, 135, 225, 315 degrees
qpsk_const_radians = qpsk_const_degrees*np.pi/180.0 # sin() and cos() takes in radians
qpsk_const_symbols = qpsk_v_size*np.cos(qpsk_const_radians) + qpsk_v_size*1j*np.sin(qpsk_const_radians) # this produces our QPSK complex symbols
plt.plot(
|
np.real(qpsk_const_symbols)
|
numpy.real
|
# First look at the color filtering
# It is based on that code for pink ball detection color filtering.py
# That code was for Green Ball, this is for pink ball
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
#flip video
frame=cv2.flip(frame, 1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Hue Saturation Value
# http://colorizer.org/ for knowing color hsv values
lower_pink =
|
np.array([165, 160, 76])
|
numpy.array
|
#! /usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement datasets classes for graph and family tree tasks."""
import numpy as np
import itertools
from torch.utils.data.dataset import Dataset
#from torchvision import datasets
import torch
import jacinle.random as random
import pickle
from .family import randomly_generate_family
from ...envs.graph import get_random_graph_generator
import math
from .nqueens import NQueenSolution
from IPython.core.debugger import Pdb
import copy
from jacinle.logging import get_logger, set_output_file
from torch.distributions.categorical import Categorical
TRAIN = 0
DEV = 1
TEST = 2
logger = get_logger(__file__)
__all__ = [
'GraphOutDegreeDataset', 'GraphConnectivityDataset', 'GraphAdjacentDataset',
'FamilyTreeDataset','NQueensDataset', 'FutoshikiDataset','TowerDataset','SudokuDataset'
]
class GraphDatasetBase(Dataset):
"""Base dataset class for graphs.
Args:
epoch_size: The number of batches for each epoch.
nmin: The minimal number of nodes in the graph.
pmin: The lower bound of the parameter p of the graph generator.
nmax: The maximal number of nodes in the graph,
the same as $nmin in default.
pmax: The upper bound of the parameter p of the graph generator,
the same as $pmin in default.
directed: Generator directed graph if directed=True.
gen_method: Controlling the graph generation method.
If gen_method='dnc', use the similar way as in DNC paper.
Else using Erdos-Renyi algorithm (each edge exists with prob).
"""
def __init__(self,
epoch_size,
nmin,
pmin,
nmax=None,
pmax=None,
directed=False,
gen_method='dnc'):
self._epoch_size = epoch_size
self._nmin = nmin
self._nmax = nmin if nmax is None else nmax
assert self._nmin <= self._nmax
self._pmin = pmin
self._pmax = pmin if pmax is None else pmax
assert self._pmin <= self._pmax
self._directed = directed
self._gen_method = gen_method
def _gen_graph(self, item):
n = self._nmin + item % (self._nmax - self._nmin + 1)
p = self._pmin + random.rand() * (self._pmax - self._pmin)
gen = get_random_graph_generator(self._gen_method)
return gen(n, p, directed=self._directed)
def __len__(self):
return self._epoch_size
class GraphOutDegreeDataset(GraphDatasetBase):
"""The dataset for out-degree task in graphs."""
def __init__(self,
degree,
epoch_size,
nmin,
pmin,
nmax=None,
pmax=None,
directed=False,
gen_method='dnc'):
super().__init__(epoch_size, nmin, pmin, nmax, pmax, directed, gen_method)
self._degree = degree
def __getitem__(self, item):
graph = self._gen_graph(item)
# The goal is to predict whether out-degree(x) == self._degree for all x.
return dict(
n=graph.nr_nodes,
relations=np.expand_dims(graph.get_edges(), axis=-1),
target=(graph.get_out_degree() == self._degree).astype('float'),
)
class GraphConnectivityDataset(GraphDatasetBase):
"""The dataset for connectivity task in graphs."""
def __init__(self,
dist_limit,
epoch_size,
nmin,
pmin,
nmax=None,
pmax=None,
directed=False,
gen_method='dnc'):
super().__init__(epoch_size, nmin, pmin, nmax, pmax, directed, gen_method)
self._dist_limit = dist_limit
def __getitem__(self, item):
graph = self._gen_graph(item)
# The goal is to predict whether (x, y) are connected within a limited steps
# I.e. dist(x, y) <= self._dist_limit for all x, y.
return dict(
n=graph.nr_nodes,
relations=np.expand_dims(graph.get_edges(), axis=-1),
target=graph.get_connectivity(self._dist_limit, exclude_self=True),
)
class GraphAdjacentDataset(GraphDatasetBase):
"""The dataset for adjacent task in graphs."""
def __init__(self,
nr_colors,
epoch_size,
nmin,
pmin,
nmax=None,
pmax=None,
directed=False,
gen_method='dnc',
is_train=True,
is_mnist_colors=False,
mnist_dir='../data'):
super().__init__(epoch_size, nmin, pmin, nmax, pmax, directed, gen_method)
self._nr_colors = nr_colors
self._is_mnist_colors = is_mnist_colors
# When taking MNIST digits as inputs, fetch MNIST dataset.
if self._is_mnist_colors:
assert nr_colors == 10
self.mnist = datasets.MNIST(
mnist_dir, train=is_train, download=True, transform=None)
def __getitem__(self, item):
graph = self._gen_graph(item)
n = graph.nr_nodes
if self._is_mnist_colors:
m = self.mnist.__len__()
digits = []
colors = []
for i in range(n):
x = random.randint(m)
digit, color = self.mnist.__getitem__(x)
digits.append(np.array(digit)[np.newaxis])
colors.append(color)
digits, colors = np.array(digits), np.array(colors)
else:
colors = random.randint(self._nr_colors, size=n)
states = np.zeros((n, self._nr_colors))
adjacent = np.zeros((n, self._nr_colors))
# The goal is to predict whether there is a node with desired color
# as adjacent node for each node x.
for i in range(n):
states[i, colors[i]] = 1
adjacent[i, colors[i]] = 1
for j in range(n):
if graph.has_edge(i, j):
adjacent[i, colors[j]] = 1
if self._is_mnist_colors:
states = digits
return dict(
n=n,
relations=np.expand_dims(graph.get_edges(), axis=-1),
states=states,
colors=colors,
target=adjacent,
)
class FamilyTreeDataset(Dataset):
"""The dataset for family tree tasks."""
def __init__(self,
task,
epoch_size,
nmin,
nmax=None,
p_marriage=0.8,
balance_sample=False):
super().__init__()
self._task = task
self._epoch_size = epoch_size
self._nmin = nmin
self._nmax = nmin if nmax is None else nmax
assert self._nmin <= self._nmax
self._p_marriage = p_marriage
self._balance_sample = balance_sample
self._data = []
def _gen_family(self, item):
n = self._nmin + item % (self._nmax - self._nmin + 1)
return randomly_generate_family(n, self._p_marriage)
def __getitem__(self, item):
#Pdb().set_trace()
while len(self._data) == 0:
family = self._gen_family(item)
relations = family.relations[:, :, 2:]
if self._task == 'has-father':
target = family.has_father()
elif self._task == 'has-daughter':
target = family.has_daughter()
elif self._task == 'has-sister':
target = family.has_sister()
elif self._task == 'parents':
target = family.get_parents()
elif self._task == 'grandparents':
target = family.get_grandparents()
elif self._task == 'uncle':
target = family.get_uncle()
elif self._task == 'maternal-great-uncle':
target = family.get_maternal_great_uncle()
else:
assert False, '{} is not supported.'.format(self._task)
if not self._balance_sample:
return dict(n=family.nr_people, relations=relations, target=target)
# In balance_sample case, the data format is different. Not used.
def get_positions(x):
return list(np.vstack(np.where(x)).T)
def append_data(pos, target):
states = np.zeros((family.nr_people, 2))
states[pos[0], 0] = states[pos[1], 1] = 1
self._data.append(dict(n=family.nr_people,
relations=relations,
states=states,
target=target))
positive = get_positions(target == 1)
if len(positive) == 0:
continue
negative = get_positions(target == 0)
np.random.shuffle(negative)
negative = negative[:len(positive)]
for i in positive:
append_data(i, 1)
for i in negative:
append_data(i, 0)
return self._data.pop()
def __len__(self):
return self._epoch_size
class NQueensDataset(Dataset):
"""The dataset for nqueens tasks."""
def __init__(self,
epoch_size,
n=10,
num_missing = 1,
random_seed = 42,
min_loss = False,
arbit_solution = False,
train_dev_test = TRAIN,
data_file = None,
data_sampling='rs'):
super().__init__()
self._epoch_size = epoch_size
self._n = n
self.num_missing = num_missing
self.min_loss = min_loss
self.arbit_solution = arbit_solution
self.mode = train_dev_test
self.data_sampling = data_sampling
self.nqueen_solver = NQueenSolution()
self.relations = self.nqueen_solver.get_relations(n)
print("In constructor. Size: {}".format(n))
if data_file is None:
outfile = "data/nqueens_data_"+str(self._n)+"_"+str(self.num_missing)+".pkl"
else:
outfile = data_file
#
with open(outfile,"rb") as f:
self.dataset = pickle.load(f)
self.max_count = 0
self.unique_indices = []
self.ambiguous_indices = []
for i,data in enumerate(self.dataset):
self.max_count = max(self.max_count, data["count"])
if data["count"]==1:
self.unique_indices.append(i)
else:
self.ambiguous_indices.append(i)
np.random.seed(random_seed)
self.reset_sampler(data_sampling)
def reset_sampler(self,data_sampling):
self.data_sampling = data_sampling
if data_sampling == 'rsxy':
logger.info("Sampling uniformly from (x,y) tuples")
self.sampler = Categorical(probs = torch.tensor([x['count'] for x in self.dataset]).float())
else:
self.sampler = Categorical(probs = torch.tensor([1.0 for _ in self.dataset]).float())
def pad_set(self,target_set):
pad_counter = self.max_count - len(target_set)
return_set = list(target_set)
return_set.extend([target_set[-1] for _ in range(pad_counter)])
return np.array(return_set)
def sample_imbalance(self, imbalance_ratio):
if np.random.rand()<imbalance_ratio:
ind = np.random.choice(self.ambiguous_indices)
else:
ind = np.random.choice(self.unique_indices)
return ind
def __getitem__(self, item):
#ind = np.random.randint(0,len(self.dataset))
ind = self.sampler.sample().item()
if self.mode==TRAIN:
if self.data_sampling=="unique":
ind = self.sample_imbalance(0)
elif self.data_sampling=="ambiguous":
ind = self.sample_imbalance(1)
elif self.data_sampling=="one-one":
ind = self.sample_imbalance(0.5)
elif self.data_sampling=="two-one":
ind = self.sample_imbalance(0.33)
elif self.data_sampling=="three-one":
ind = self.sample_imbalance(0.25)
elif self.data_sampling=="four-one":
ind = self.sample_imbalance(0.20)
else:
ind = item%len(self.dataset)
data = self.dataset[ind]
if len(data["query"].shape)==1:
data["query"] = np.expand_dims(data["query"],1)
if self.mode==TRAIN and self.arbit_solution:
data["target"] = data["target_set"][0]
else:
data["target"] = data["target_set"][np.random.randint(len(data["target_set"]))]
#
data["target_set"] = self.pad_set(data["target_set"])
data['mask'] = np.array([1 for _ in range(data['count'])] + [0 for _ in range(data['target_set'].shape[0] - data['count'])])
#Pdb().set_trace()
data["relations"] = self.relations
data['ind'] = ind
if isinstance(data["qid"],tuple):
data["qid"] = np.array([data["qid"][0]]+list(data["qid"][1]))
return data
def __len__(self):
if self.mode==TRAIN:
return self._epoch_size
else:
return len(self.dataset)
class FutoshikiDataset(Dataset):
"""The dataset for futoshiki tasks."""
def __init__(self,
epoch_size,
n=10,
num_missing = 1,
num_constraints = 0,
data_size = -1,
random_seed = 42,
min_loss = False,
arbit_solution = False,
train_dev_test = TRAIN,
data_file = None,
data_sampling='rs',args=None):
super().__init__()
self.args = args
self._epoch_size = epoch_size
self._n = n
self.num_missing = num_missing
self.min_loss = min_loss
self.arbit_solution = arbit_solution
self.mode = train_dev_test
self.data_sampling = data_sampling
self.relations = self.get_relations()
print("In constructor. Size: {}".format(n))
if train_dev_test == TRAIN:
mode = 'train'
elif train_dev_test == DEV:
mode = 'val'
elif train_dev_test == TEST:
mode = 'test'
if data_file is None:
outfile = "data/futo_{}_{}_{}_{}.pkl".format(self._n, num_missing, num_constraints, mode)
else:
outfile = data_file
#
logger.info("data file : {}".format(outfile))
#Pdb().set_trace()
with open(outfile,"rb") as f:
self.dataset = pickle.load(f)
if data_size != -1:
self.dataset= self.dataset[:data_size]
#
self.max_count = 0
self.unique_indices = []
self.ambiguous_indices = []
for i,data in enumerate(self.dataset):
if 'count' in data:
this_count = data['count']
else:
this_count = data['target_set'].shape[0]
data['count'] = this_count
self.max_count = max(self.max_count, this_count)
if this_count == 1:
self.unique_indices.append(i)
else:
self.ambiguous_indices.append(i)
np.random.seed(random_seed)
self.reset_sampler(data_sampling)
def reset_sampler(self,data_sampling):
self.data_sampling = data_sampling
if data_sampling == 'rsxy':
logger.info("Sampling uniformly from (x,y) tuples")
self.sampler = Categorical(probs = torch.tensor([x['count'] for x in self.dataset]).float())
else:
self.sampler = Categorical(probs = torch.tensor([1.0 for _ in self.dataset]).float())
def get_relations(self):
n = self._n
n2 = self._n**2
n3 = self._n**3
relations = np.zeros((n3, n3,3))
for x in range(n3):
row = int(x/n2)
col = int((x%n2)/n)
num = int(x%n2)%n
for y in range(n):
# cell constraints
relations[x][row*n2+col*n+y][0]=1
# row constraints
relations[x][y*n2+col*n+num][1]=1
# column constraints
relations[x][row*n2+y*n+num][2]=1
return relations
def pad_set(self,target_set):
pad_counter = self.max_count - len(target_set)
if pad_counter < 0:
return target_set[:self.max_count]
return_set = list(target_set)
return_set.extend([target_set[-1] for _ in range(pad_counter)])
return np.array(return_set)
def sample_imbalance(self, imbalance_ratio):
if np.random.rand()<imbalance_ratio:
ind = np.random.choice(self.ambiguous_indices)
else:
ind =
|
np.random.choice(self.unique_indices)
|
numpy.random.choice
|
import logging
import numpy as np
import gym
import pytest
from ..base.errors import NumpyArrayCheckError
from .misc import set_tf_loglevel
from .array import (
idx, check_numpy_array, project_onto_actions_np, softmax, log_softmax,
box_to_reals_np, reals_to_box_np)
set_tf_loglevel(logging.ERROR)
def test_check_numpy_array_ndim_min():
arr = np.array([])
with pytest.raises(NumpyArrayCheckError):
check_numpy_array(arr, ndim_min=2)
def test_idx_type():
with pytest.raises(NumpyArrayCheckError):
idx(0)
with pytest.raises(NumpyArrayCheckError):
idx('foo')
with pytest.raises(NumpyArrayCheckError):
idx([])
with pytest.raises(NumpyArrayCheckError):
idx(None)
def test_idx_empty():
arr = np.array(0)
with pytest.raises(NumpyArrayCheckError):
idx(arr)
def test_idx_expected():
arr = np.resize(np.arange(12), (3, 4))
np.testing.assert_array_equal(idx(arr), [0, 1, 2])
assert idx(arr).dtype == 'int'
def test_project_onto_actions_np_expected():
Y = np.resize(np.arange(12), (3, 4))
A = np.array([2, 0, 3])
Y_proj = project_onto_actions_np(Y, A)
assert Y_proj.ndim == 1
assert Y_proj.shape == (3,)
np.testing.assert_array_equal(Y_proj, [Y[0, 2], Y[1, 0], Y[2, 3]])
def test_softmax_expected():
rnd = np.random.RandomState(7)
w = rnd.randn(3, 5)
x = softmax(w, axis=1)
y = softmax(w + 100., axis=1)
z = softmax(w * 100., axis=1)
# check shape
assert x.shape == w.shape
# check normalization
np.testing.assert_almost_equal(x.sum(axis=1), np.ones(3))
# check translation invariance
np.testing.assert_almost_equal(y.sum(axis=1), np.ones(3))
np.testing.assert_almost_equal(x, y)
# check robustness by clipping
assert not np.any(np.isnan(z))
np.testing.assert_almost_equal(z.sum(axis=1),
|
np.ones(3)
|
numpy.ones
|
import scipy.signal
import numpy as np
#===========================================================
# Routine by Luis-<NAME> (IPGP & IFSTTAR), Jan 2020.
#===========================================================
# Tapering with a Hanning window
def taper(x,p):
if p <= 0.0:
return x
else:
f0 = 0.5
f1 = 0.5
n = len(x)
nw = int(p*n)
if nw > 0:
ow = np.pi/nw
w = np.ones( n )
for i in range( nw ):
w[i] = f0 - f1 * np.cos(ow*i)
for i in range( n-nw,n ):
w[i] = 1.0 - w[i-n+nw]
return x * w
elif nw == 0:
return x
# Bitwise version
def next_power_of_2(n):
"""
Return next power of 2 greater than or equal to n
"""
return 2**(n-1).bit_length()
# PCC2 from Ventosa el al. (2019)
def pcc2(x1, x2, dt, lag0, lagu):
# Preprocessing
x1 = x1 -
|
np.mean(x1)
|
numpy.mean
|
import numpy as np
import numpy.linalg as linalg
import scipy as sp
import scipy.sparse as sparse
import scipy.special as special
import matplotlib.pyplot as plt
import itertools
import multiprocessing as mp
def bitvector_to_index(v):
'''
Given a list or array of binary values (v), convert to
an integer. The list is assumed to be big-endian.
This method is meant to convert back and forth between
matrix indices and spin configurations. In this sense,
zeros are interpretted as spins in the +z state while
ones are spins in the -z state.
Arguments:
v: One dimensional list or array of binary
(0 or 1) values.
Returns:
ind: Integer corresponding to big-endian
interpretation of the binary list v.
'''
ind = sum([vj*(2**(len(v)-j-1)) for j,vj in enumerate(v)])
return ind
def index_to_bitvector(ind, n):
'''
Given a non-negative interger (ind) and a number of
bits (n), return a big-endian binary list of length n
encoding ind. Require 0 <= ind < 2**n.
This method is meant to convert back and forth between
matrix indices and spin configurations. In this sense,
zeros are interpretted as spins in the +z state while
ones are spins in the -z state. n is the total number
of spins.
Arguments:
ind: Integer to be converted. Must be in
range(2**n).
n: Integer representing total number of
bits / spin sites.
Returns:
v: Length n list of binary (0 or 1) values
to be interpretted as big-endian.
'''
assert(isinstance(ind, int))
assert(isinstance(n, int))
assert(n >= 1)
assert(ind in range(2**n))
v = [int(bit) for bit in format(ind, '0'+str(n)+'b')]
return v
def construct_local_pauli(nSpins, nMeas, axis):
'''
For a system with nSpins total spins, constructs the
operator corresponding to the Pauli operator along
the specified axis at the specified spin (nMeas).
Arguments:
nSpins: Integer representing total number of spin
sites.
nMeas: Integer representing the spin site where
the operator will act. Indexed from zero,
thus must be in range(nSpins).
axis: A list of the form [theta, phi] specifying
the axis of the Pauli operator's action.
Theta is the polar angle given by a float
in [0,pi] while phi is the azimuthal angle
given by a float in [0,2*pi).
Returns:
P: A two-dimensional sparse CSR-matrix of size
(2**nSpins x 2**nSpins) representing the
action of the local Pauli operator on the
full Hilbert space.
'''
assert(isinstance(nSpins, int))
assert(nSpins >= 1)
assert(isinstance(nMeas, int))
assert(nMeas in range(nSpins))
assert(len(axis)==2)
theta, phi = axis
# Define 2x2 pauli matrix for the chosen axis
sigma = np.array( [ [np.cos(theta), \
np.sin(theta)*np.exp(-1j*phi)], \
[np.sin(theta)*np.exp(1j*phi),\
-np.cos(theta)] ], dtype='complex')
# Define operator on full Hilbert space as a sparse
# COO-matrix
row = []
col = []
data = []
for ind in range(2**nSpins):
# Convert current index to a binary list. Find
# index for equivalent state with just the local
# spin nMeas flipped.
v = index_to_bitvector(ind,nSpins)
vFlip = v.copy()
vFlip[nMeas] = 1 - vFlip[nMeas]
indFlip = bitvector_to_index(vFlip)
# Using the 2x2 Pauli matrix sigma, add the
# contributions from these two states to the
# Pauli matrix on the full Hilbert space.
s_site = v[nMeas]
sFlip_site = vFlip[nMeas]
row.append(ind)
col.append(ind)
data.append(sigma[s_site,s_site])
row.append(ind)
col.append(indFlip)
data.append(sigma[s_site,sFlip_site])
P = sparse.coo_matrix((data, (row,col)), \
shape=(2**nSpins,2**nSpins), \
dtype='complex')
# Convert to sparse CSR-matrix
P = P.tocsr()
return P
def construct_local_projector(nSpins, nMeas, m, axis):
'''
For a system with nSpins total spins, constructs the
operator corresponding to the projection operator along
the specified axis at the specified spin (nMeas) onto
the subspace given by the value of m.
Arguments:
nSpins: Integer representing total number of spin
sites.
nMeas: Integer representing the spin site where
the operator will act. Indexed from zero,
thus must be in range(nSpins).
m: The integer 1 or 0. Corresponds to either
projecting on to the subspace of spin
aligned with the axis (0) or anti-aligned
with the axis (1).
axis: A list of the form [theta, phi] specifying
the axis of the projection operator's action.
Theta is the polar angle given by a float
in [0,pi] while phi is the azimuthal angle
given by a float in [0,2*pi).
Returns:
P: A two-dimensional sparse CSR-matrix of size
(2**nSpins x 2**nSpins) representing the
action of the local projection operator on
the full Hilbert space.
'''
assert(isinstance(nSpins, int))
assert(nSpins >= 1)
assert(isinstance(nMeas, int))
assert(nMeas in range(nSpins))
assert(m==0 or m==1)
assert(len(axis)==2)
# Set sign of projector.
s = 1 - 2*m
I = sparse.identity(2**nSpins,format='csr')
pauli = construct_local_pauli(nSpins,nMeas,axis)
P = .5*(I+s*pauli)
return P
def construct_global_projector(nSpins, pState, axisL):
'''
For a system with nSpins total spins, constructs the
operator corresponding to the projection operator along
the specified axis at the specified spin (nMeas) onto
the subspace given by the sign of s.
Arguments:
nSpins: Integer representing total number of spin
sites.
pState: A list of length nSpins consisting of the
integers 0 and 1 or None. Each integer denotes
the orientation of the spin along the
corresponding measurement axis. Zeros are
interpretted as spins aligned along the axis
while ones are anti-aligned. None indicates no
measurement is performed and must be matched
with a corresponding None axis. The returned
projection operator projects onto this state.
axisL: A list of length nSpins consisting of axes
where each axis is specified by the form
[theta, phi] or None. Theta is the polar angle
given by a float in [0,pi] while phi is the
azimuthal angle given by a float in [0,2*pi).
Each axis specifies the orientation of the
projection operator's action on the
corresponding spin. If no measurement is to
be performed on the spin, the corresponding
axis can be set to None. This must be matched
with a corresponding None in pState.
Returns:
P: A two-dimensional numpy-matrix of size
(2**nSpins x 2**nSpins) representing the
projection operator onto the state specified
by pState and axisL.
'''
assert(isinstance(nSpins, int))
assert(nSpins >= 1)
assert(len(pState)==nSpins)
assert(len(axisL)==nSpins)
assert(all([axis==None or len(axis)==2 for axis in axisL]))
assert(all([m==0 or m==1 or m==None for m in pState]))
P = 1
for m, axis in zip(pState, axisL):
try:
# Axis and projection specified
s = 1-2*m
theta, phi = axis
sigma = np.array( [ [np.cos(theta), \
np.sin(theta)*np.exp(-1j*phi)], \
[np.sin(theta)*np.exp(1j*phi),\
-np.cos(theta)] ], dtype='complex')
I = np.identity(2)
t = .5*(np.identity(2)+s*sigma)
except TypeError:
# No projection specified
t = np.identity(2)
P = np.kron(P, t)
return P
def construct_complete_projector_list(nSpins, axisL):
'''
For a system with nSpins total spins, constructs all
possible projection operators corresponding to possible
measurement outcomes along the list of axes provided
(axisL). The list of returned projection operators is
complete.
Arguments:
nSpins: Integer representing total number of spin
sites.
axisL: A list of length nSpins consisting of axes
where each axis is specified by the form
[theta, phi] or None. Theta is the polar angle
given by a float in [0,pi] while phi is the
azimuthal angle given by a float in [0,2*pi).
Each axis specifies the orientation of the
projection operator's action on the
corresponding spin. If no measurement is to
be performed on the spin, the corresponding
axis can be set to None.
Returns:
Plist: A list of two-dimensional numpy-matrices each
of size (2**nSpins x 2**nSpins) representing
a projection operator onto a fine-grained
state.
'''
assert(isinstance(nSpins, int))
assert(nSpins >= 1)
assert(len(axisL)==nSpins)
assert(all([axis==None or len(axis)==2 for axis in axisL]))
# List possible projective measurements for each spin
mList = [ [None] if axis==None else [0,1] for axis in axisL]
# Generate all possible combinations of projective
# measurements
stateList = list(itertools.product(*mList))
# Generate complete list of projectors
Plist = []
for pState in stateList:
P = construct_global_projector(nSpins, pState, axisL)
Plist.append(P)
return Plist
def transverse_field_ising_hamiltonian(nSpins, h, g, J):
'''
For a system with nSpins total spins, constructs the
Hamiltonian for the transverse-field Ising model. The
strength of nearest-neighbor interactions is set by J,
while h is the transverse field along the z-axis, and
g is the longitudinal field along the x-axis.
Hamiltonian can be written as,
H = - J sum_{i} \sigma^{z}_{i} \sigma^{z}_{i+1}
- h sum_{i} \sigma^{z}_{i}
- g sum_{i} \sigma^{x}_{i}
Arguments:
nSpins: Integer representing total number of spin
sites.
h: Float giving the strength of the external
field in the z-axis.
g: Float giving the strength of the external
field in the x-axis.
J: Float giving the strength of nearest-
neighbor interactions.
Returns:
H: A two-dimensional sparse CSR-matrix of size
(2**nSpins x 2**nSpins) representing the
Hamiltonian of the system.
'''
assert(isinstance(nSpins, int))
assert(nSpins >= 1)
# Define the Hamiltonian as a COO-sparse matrix
row = []
col = []
data = []
for ind in range(2**nSpins):
v = index_to_bitvector(ind,nSpins)
# For given spin configuration, calculate the
# contributions from the transverse field and
# spin-spin interactions
nUp = sum([s==0 for s in v])
c = sum([(-1)**(v[i]+v[i+1]) for i in range(nSpins-1)])
row.append(ind)
col.append(ind)
data.append(-h*(2*nUp-nSpins)-J*c)
for n in range(nSpins):
# Flip a single spin from the current
# spin configuration
vFlip = v.copy()
vFlip[n] = 1 - vFlip[n]
indFlip = bitvector_to_index(vFlip)
# Calculate the coupling from the
# longitudinal field.
row.append(ind)
col.append(indFlip)
data.append(-g)
H = sparse.coo_matrix((data, (row,col)), \
shape=(2**nSpins,2**nSpins), \
dtype='float')
# Convert to sparse CSR-matrix
H = H.tocsr()
return H
def power_law_ising_hamiltonian(nSpins, h, g, J, lmax, zeta):
'''
For a system with nSpins total spins, constructs the
Hamiltonian for the power-law Ising model. hList
specifies the transverse field along the z-axis at each
site, and gList is the longitudinal field along the
x-axis at each site. If either of these are given as
a scalar instead of a list, the field is assumed to be
constant. The strength of the spin-spin interaction is
set by J. lmax sets the maximum distance of the
interaction (measured in spin sites) while zeta
specifies the power at which the interaction decays.
Hamiltonian can be written as,
H = - sum_{l=1}^{lmax} sum_{i=1}^{nSpins-l}
(J/l**zeta) * \sigma^{z}_{i} \sigma^{z}_{i+l}
- sum_{i} h_{i} \sigma^{z}_{i}
- sum_{i} g_{i} \sigma^{x}_{i}
Arguments:
nSpins: Integer representing total number of spin
sites.
h: List of length nSpins consisting of floats
giving the strength of the external field in
the z-axis at each spin site. Can also be
provided as a single float if the field is
uniform.
g: List of length nSpins consisting of floats
giving the strength of the external field in
the x-axis at each spin site. Can also be
provided as a single float if the field is
uniform.
J: Float giving the strength of spin-spin
interactions.
lmax: Maximum distance of the spin-spin interaction
measured in number of lattice sites. Must be
an integer between 0 (no interaction) and
nSpins-1 (all pair-wise interactions).
zeta: Non-negative float describing the power at
which spin-spin interactions decay with
distance. Scaling is of the form 1 / l**zeta.
Returns:
H: A two-dimensional sparse CSR-matrix of size
(2**nSpins x 2**nSpins) representing the
Hamiltonian of the system.
'''
assert(isinstance(nSpins, int))
assert(nSpins >= 1)
assert(isinstance(lmax, int))
assert(0<=lmax and lmax<=nSpins-1)
assert(zeta >= 0)
# Define the Hamiltonian as a COO-sparse matrix
row = []
col = []
data = []
for ind in range(2**nSpins):
v = index_to_bitvector(ind,nSpins)
# Contribution from spin-spin interactions
Hss = J*sum([ sum([(-1)**(v[i]+v[i+l])/(l**zeta) for i in range(nSpins-l)])
for l in range(1,lmax+1)])
try:
# Calculate transverse field contribution
Hz = sum([h[i]*(1-2*s) for i,s in enumerate(v)])
except TypeError:
# Case where transverse field is uniform
Hz = h*sum([(1-2*s) for s in v])
row.append(ind)
col.append(ind)
data.append(-Hz-Hss)
for n in range(nSpins):
# Flip a single spin from the current
# spin configuration
vFlip = v.copy()
vFlip[n] = 1 - vFlip[n]
indFlip = bitvector_to_index(vFlip)
row.append(ind)
col.append(indFlip)
try:
# Add longitudinal field contribution
Hx = g[n]
except TypeError:
# Case where longitudinal field is uniform
Hx = g
data.append(-Hx)
H = sparse.coo_matrix((data, (row,col)), \
shape=(2**nSpins,2**nSpins), \
dtype='float')
# Convert to sparse CSR-matrix
H = H.tocsr()
return H
def get_U(Lambda, Q, t, hbar=1):
'''
Calculates the time-evolution operator for a system
over a period t given the eigenvalue decomposition
of the Hamiltonian as Lambda, Q.
Arguments:
Lambda: One dimensional numpy array of the
eigenvalues of the Hamiltonian.
Q: Numpy matrix consisting of eigenvectors
of the Hamiltonian. The column Q[:, i]
is the normalized eigenvector
corresponding to the eigenvalue
Lambda[i].
t: Float giving the time of the evolution.
hbar: Positive float giving the value of the
reduced Planck constant. By default,
units chosen so that hbar = 1.
Returns:
U: Two-dimensional numpy matrix giving the
time-evolution operator for the system.
'''
assert(hbar>0)
powers = np.exp(-1j*Lambda*t/hbar)
U = Q @ sparse.diags(powers) @ Q.getH()
return U
def thermal_density_matrix(Lambda, Q, beta):
'''
Calculates the thermal density matrix for a system
at inverse temperature beta given the eigenvalue
decomposition of the Hamiltonian as Lambda, Q.
Arguments:
Lambda: One dimensional numpy array of the
eigenvalues of the Hamiltonian.
Q: Numpy matrix consisting of eigenvectors
of the Hamiltonian. The column Q[:, i]
is the normalized eigenvector
corresponding to the eigenvalue
Lambda[i].
beta: Non-negative float or np.inf giving the
inverse temperature of the system in
units of inverse energy.
Returns:
rho: Two-dimensional numpy matrix giving the
density matrix for the system.
'''
assert(beta>=0)
if beta != np.inf:
# Temperature is non-zero, all states have
# support. Subtract off smallest eigenvalue to
# improve performance at very low temperatures.
powers = np.exp(-(Lambda-np.min(Lambda))*beta)
D = powers / np.sum(powers)
rho = Q @ sparse.diags(D) @ Q.getH()
else:
# Temperature is zero. Only ground state
# has support.
G = [1 if l==np.min(Lambda) else 0 for l in Lambda]
D = G / np.sum(G)
rho = Q @ sparse.diags(D) @ Q.getH()
return rho
def calculate_A(V1, W2, V2, W3):
'''
Calculates the quasiprobability A(V1, W2, V2, W3)
given projection operators V1, V2, W2, and W3.
W2 and W3 are given in the Heisenberg picture.
Arguments:
V1: A two-dimensional sparse CSR-matrix
representing the action of a local
projection operator.
W2: A two-dimensional numpy matrix
representing the action of a local
projection operator. For time
evolving systems, must be given in
the Heisenberg picture.
V2: A two-dimensional sparse CSR-matrix
representing the action of a local
projection operator.
W3: A two-dimensional numpy matrix
representing the action of a local
projection operator. For time
evolving systems, must be given in
the Heisenberg picture.
Returns:
A: Complex float giving the evaluated
quasiprobability.
'''
A = np.trace(W3 @ V2 @ W2 @ V1)
return A
def epsilon_smooth_distribution(dist, eps, alpha):
'''
Heuristically smooths a probability distribution
(dist) so as to minimize its max-entropy or
maximize its min-entropy. The order of the Renyi
entropy to be smoothed is given by alpha. alpha
cannot be equal to one. eps gives the maximum
distance to the smoothed distribution in Total
Variation. This smoothing is only performed over
normalized distributions and does not consider
sub-normalized distributions.
Arguments:
dist: A one-dimensional numpy array giving a
normalized probability distribution
which is to be smoothed.
eps: Smoothing factor which gives the maximum
Total Variation distance from the initial
distribution to the smoothed distribution.
Must be a float between zero and one,
exclusive.
alpha: Order of the Renyi entropy to be
smoothed. Must be np.inf or a positive
float not equal to one.
Returns:
D: eps smoothed distribution.
'''
assert(abs(1-np.sum(dist))<.001)
assert(eps>0)
assert(eps<np.sum(dist))
# Create sorted copy of the distribution
D = dist.copy()
sortInds = np.argsort(D)
Dsorted = D[sortInds]
if alpha < 1:
# Max-type entropy to minimize.
# Find the largest collection of
# low probability elements which
# sum to less than eps.
cumulDist = np.cumsum(Dsorted)
inds = np.flatnonzero(cumulDist < eps)
# Remove as much weight as possible
# from these elements
freeWeight = np.sum(Dsorted[inds])
Dsorted[inds] = 0
try:
ind = inds[-1]+1
except IndexError:
# No element was smaller than eps
ind = 0
Dsorted[ind] -= eps - freeWeight
# Put all removed weight on the
# maximum probability element
Dsorted[-1] += eps
elif alpha > 1:
# Min-type entropy to maximize.
# Flatten tops of the distribution
trimWeight = np.array([[ind, np.sum(Dsorted[ind:]-floor)] \
for ind,floor in enumerate(Dsorted)])
trim = trimWeight[(trimWeight[:,1]<eps)]
try:
# Find largest collection of peaks which can
# be completely flattened by removing at most
# eps weight.
ind, freedWeight = int(trim[0,0]), trim[0,1]
Dsorted[ind:] = Dsorted[ind]
# Remove remaining allowance of weight from the
# flat top of the distribution
delta = (eps-freedWeight) / (len(Dsorted)-ind)
Dsorted[ind:] -= delta
except IndexError:
# No set of peaks can be flattened by removing at
# most eps weight. Remove eps weight from the
# largest peak.
ind, freedWeight = len(Dsorted)-1, 0
Dsorted[ind] = Dsorted[ind] - eps
# Raise bottoms of the distribution
fillWeight = np.array([[ind, np.sum(ceil-Dsorted[:ind+1])] \
for ind, ceil in enumerate(Dsorted)])
filled = fillWeight[(fillWeight[:,1]<eps)]
ind, spentWeight = int(filled[-1,0]), filled[-1,1]
Dsorted[:ind+1] = Dsorted[ind]
delta = (eps-spentWeight) / (1+ind)
Dsorted[:ind+1] += delta
else:
# Smoothing isn't defined for alpha == 1.
raise ValueError("Smoothing not defined for alpha=1")
# Rearrange elements of the smoothed sorted distribution
D[sortInds] = Dsorted
return D
def calculate_renyi_entropy(rho, alpha, eps=0):
'''
Calculates the base-2 Renyi entropy of order alpha
for the density matrix rho. Rho may be specified
either as a full matrix or, if diagonal, it can
be specified by a one-dimensional array consisting
of the diagonal elements. If a non-zero value of
eps is given, approximates the epsilon-smoothed
Renyi entropy using a heuristic smoothing. eps
gives the maximum distance to the smoothed
distribution in Total Variation.
Arguments:
rho: Numpy array giving the density matrix.
Can either be a two dimensional array
fully specifying the matrix or, if the
matrix is diagonal, rho can be a one-
dimensional array giving the diagonal
entries.
alpha: Order of the Renyi entropy to be
calculated. Must be np.inf or a positive
float not equal to one.
eps: Smoothing factor which gives the maximum
Total Variation distance from the initial
distribution to the smoothed distribution.
Must be a non-negative float less than one.
Returns:
H: The order alpha Renyi entropy of rho as
a float.
'''
assert(alpha>=0)
assert(alpha!=1)
if rho.ndim == 1:
# Density matrices are non-negative.
# Absolute value is to prevent
# sign errors stemming from floating
# point error for values near zero.
D = np.abs(rho.copy())
else:
D, _ = linalg.eigh(rho)
D = np.abs(D)
if eps > 0:
D = epsilon_smooth_distribution(D, eps, alpha)
if alpha == np.inf:
H = -np.log2(np.max(D))
else:
# Pull out factor of largest eigenvalue to
# improve numerical performance for very
# large values of alpha
H = np.log2(np.max(D))*alpha/(1-alpha) \
+ np.log2(np.sum((D/np.max(D))**alpha))/(1-alpha)
return H
def calculate_neumann_entropy(rho):
'''
Calculates the base-2 von Neumann entropy of the
density matrix rho. Rho may be specified either
as a full matrix, or if diagonal it can be
specified by a one-dimensional array consisting
of the diagonal elements.
Arguments:
rho: Numpy array giving the density matrix.
Can either be a two dimensional array
fully specifying the matrix or, if the
matrix is diagonal, rho can be a one-
dimensional array giving the diagonal
entries.
Returns:
H: The von Neumann entropy of rho as a
float.
'''
if rho.ndim == 1:
# Density matrices are non-negative.
# Absolute value is to prevent
# sign errors stemming from floating
# point error for values near zero.
D = np.abs(rho.copy())
else:
D, _ = linalg.eigh(rho)
D = np.abs(D)
H = -np.sum(special.xlogy(D,D))/np.log(2)
return H
def weak_measurement_couplings(x0, Delta, gtilde, xList, hbar=1):
'''
Gives the weak meausurement couplings for a
system where the measurement is coupled to a
particle's position and the particle is
initialized in a momentum-space wavepacket.
Couplings are calculated from the density
functions using the simple midpoint rule where
the midpoints are the xList values.
Arguments:
x0: Reference position used to define
the interaction between the weakly
coupled particle and the system. The
interaction is governed by:
V_int = exp(-i*gtilde*(x-x0)*(\Pi^V)/hbar)
Delta: Strictly positive float giving the
spread of the readout particle's
wavepacket in momentum space.
gtilde: Float giving the coupling parameter
between the readout particle and the
system.
xList: A one-dimensional numpy array of
length N that lists the possible
positions at which the readout
particle may be observed.
hbar: Positive float giving the value of the
reduced Planck constant. By default,
units chosen so that hbar = 1.
Returns:
pgList: A two-dimensional numpy array with
shape (N x 2) and columns [px,gx]
listing the values for the weak
measurement Kraus operators.
'''
assert(Delta>0)
assert(hbar>0)
# Calculate value of density functions
pDense = (Delta / (hbar * np.sqrt(np.pi))) \
*np.exp(-(Delta*xList/hbar)**2)
gDense = np.sqrt(pDense) \
*np.expm1(-1j*(xList-x0)*gtilde/hbar)
# Find the width associated with each xValue
xDiffs = np.diff(xList)
dx = np.concatenate([[xDiffs[0]],
.5*(xDiffs[:-1]+xDiffs[1:]),
[xDiffs[-1]]])
# Scale couplings densities by the interval widths
pdx = pDense * dx
gdx = gDense * np.sqrt(dx)
# pdx should be a nearly normalized
# probability distribution. In case of
# numerical errors, impose normalization
# by hand and appropriately scale gdx.
scaling = 1.0/np.sum(pdx)
p = pdx * scaling
g = gdx * np.sqrt(scaling)
pgList = np.stack([p,g], axis=1)
return pgList
def f_arg_W(V1, V2, pg1, pg2, W):
'''
For a specified strong measurement W, calculates
the minimum over the remaining parameters of the
argument which appears on the RHS of the
inequality.
Arguments:
V1: A two-dimensional sparse CSR-matrix
representing the action of a local
projection operator. Corresponds to a
weak measurement.
V2: A two-dimensional sparse CSR-matrix
representing the action of a local
projection operator. Corresponds to a
weak measurement.
pg1: A two-dimensional numpy array with
shape (N x 2) and columns [px,gx]
listing the values for the weak
measurement Kraus operator V1.
pg2: A two-dimensional numpy array with
shape (N x 2) and columns [px,gx]
listing the values for the weak
measurement Kraus operator V2.
W: A two-dimensional numpy matrix
representing the action of a
projection operator. Corresponds to a
strong measurement. For time
evolving systems, must be given in
the Heisenberg picture.
Returns:
a: Float giving the minimum value of the
argument for the specified strong
measurement W.
aOrd: Numpy array [LO, NLO, NNLO] of giving
the contributions to the argument by
order in the small parameter g.
aTerm: Numpy array giving the contributions
to the argument by each term on the RHS
of the inequality.
qProbs: Numpy array giving the quasiprobabilities
[A1213, A2223, A1223].
'''
TrW = np.trace(W)
# Check if V1 and V2 are the same
if (V1!=V2).nnz==0:
# V1 == V2
TrWV1 = np.trace(W @ V1)
TrWV2 = TrWV1
A1213 = calculate_A(V1, W, V1, W)
A2223 = A1213
A1223 = A1213
dV1V2 = 1
else:
# V1 != V2
TrWV1 = np.trace(W @ V1)
TrWV2 = np.trace(W @ V2)
A1213 = calculate_A(V1, W, V1, W)
A2223 = calculate_A(V2, W, V2, W)
A1223 = calculate_A(V1, W, V2, W)
dV1V2 = 0
# Initialize the running minimum and associated
# decompositions of the RHS.
a = np.inf
aOrd = np.array([])
aTerms = np.array([])
qProbs = np.array([A1213, A2223, A1223])
# Need to consider all possible valid
# combinations of weak measurement parameters.
for p1,g1 in pg1:
p2 = pg2[:,0]
g2 = pg2[:,1]
# Calculate leading order term
t1 = np.real( -np.log2(p1*p2*TrW) )
LO = t1
# Calculate order g terms
t2 = np.real( -2*np.real(g1)*TrWV1/(np.sqrt(p1)*TrW*np.log(2)) )
t3 = np.real( -2*np.real(g2)*TrWV2/(np.sqrt(p2)*TrW*np.log(2)) )
NLO = t2 + t3
# Calculate order g**2 terms
t4 = np.real( -(np.abs(g1)**2)*A1213/(p1*TrW*np.log(2)) )
t5 = np.real( -(np.abs(g2)**2)*A2223/(p2*TrW*np.log(2)) )
t6 = np.real( -2*np.real(g1*g2*A1223)/(np.sqrt(p1*p2)*TrW*np.log(2)) )
t7 = np.real( -2*np.real(g1*np.conj(g2)*TrWV1)*dV1V2/(np.sqrt(p1*p2)*TrW*np.log(2)) )
t8 = np.real( 2*((np.real(g1)*TrWV1/(np.sqrt(p1)*TrW))**2)/np.log(2) )
t9 = np.real( 4*(np.real(g1)*TrWV1/(np.sqrt(p1)*TrW)) \
*(np.real(g2)*TrWV2/(np.sqrt(p2)*TrW))/np.log(2) )
t10 = np.real( 2*((np.real(g2)*TrWV2/(np.sqrt(p2)*TrW))**2)/np.log(2) )
NNLO = t4 + t5 + t6 + t7 + t8 + t9 + t10
# Sum all contributions
total = LO + NLO + NNLO
# Check if this contains a new minimum
if np.min(total) <= a:
indMin = np.argmin(total)
a = total[indMin]
aOrd = np.array([LO[indMin], NLO[indMin], NNLO[indMin]])
aTerm = np.array([t1[indMin], t2, t3[indMin], t4, t5[indMin], \
t6[indMin], t7[indMin], t8, t9[indMin], t10[indMin]])
return a, aOrd, aTerm, qProbs
def RHS(V1, V2, pg1, pg2, Wlist):
'''
For a list of strong measurements (Wlist), and
weak measurements V1 and V2 with lists of Kraus
operator parameters pg1 and pg2, calculates the
RHS of the inequality.
Arguments:
V1: A two-dimensional sparse CSR-matrix
representing the action of a local
projection operator. Corresponds to a
weak measurement.
V2: A two-dimensional sparse CSR-matrix
representing the action of a local
projection operator. Corresponds to a
weak measurement.
pg1: A two-dimensional numpy array with
shape (N x 2) and columns [px,gx]
listing the values for the weak
measurement Krauss operator V1.
pg2: A two-dimensional numpy array with
shape (N x 2) and columns [px,gx]
listing the values for the weak
measurement Krauss operator V2.
Wlist: A list of two-dimensional numpy matrices
each representing the action of a
projection operator. Each matrix
corresponds to a strong measurement. For
time evolving systems, must be given in
the Heisenberg picture.
Returns:
f: Float giving the RHS of the inequality.
fOrd: One-dimensional numpy array [LO, NLO, NNLO]
giving the contributions to f by order in
the small parameter g.
fTerm: Numpy array giving the contributions
to f by each term on the RHS of the
inequality.
qProbs: Numpy array giving the quasiprobabilities
[A1213, A2223, A1223].
'''
# For each W in Wlist, find the minimum of the
# argument on the RHS of the inequality
argList = []
argListByOrder = []
argListByTerm = []
argListQuasi = []
for W in Wlist:
aW, aWOrd, aWTerm, aQProbs = f_arg_W(V1, V2, pg1, pg2, W)
argList.append(aW)
argListByOrder.append(aWOrd)
argListByTerm.append(aWTerm)
argListQuasi.append(aQProbs)
# Find the minimum argument over choice of W
indMin = np.argmin(argList)
f = argList[indMin]
fOrd = argListByOrder[indMin]
fTerm = argListByTerm[indMin]
qProbs = argListQuasi[indMin]
return f, fOrd, fTerm, qProbs
def f_arg_strong(V1, V2, pg1, pg2, W2, W3):
'''
For specified strong measurements W2 and W3,
calculates the minimum over the remaining
parameters of the argument which appears on
the RHS of the inequality.
Arguments:
V1: A two-dimensional sparse CSR-matrix
representing the action of a local
projection operator. Corresponds to a
weak measurement.
V2: A two-dimensional sparse CSR-matrix
representing the action of a local
projection operator. Corresponds to a
weak measurement.
pg1: A two-dimensional numpy array with
shape (N x 2) and columns [px,gx]
listing the values for the weak
measurement Kraus operator V1.
pg2: A two-dimensional numpy array with
shape (N x 2) and columns [px,gx]
listing the values for the weak
measurement Kraus operator V2.
W2: A two-dimensional numpy matrix
representing the action of a
projection operator. Corresponds to a
strong measurement. For time
evolving systems, must be given in
the Heisenberg picture.
W3: A two-dimensional numpy matrix
representing the action of a
projection operator. Corresponds to a
strong measurement. For time
evolving systems, must be given in
the Heisenberg picture.
Returns:
f: Float giving the minimum value of the
RHS for the specified strong
measurements W2 and W3.
aOrd: Numpy array [LO, NLO, N2LO, N3LO, N4LO]
giving the contributions to the argument
by order in the parameter g.
aTerm: Numpy array giving the contributions
to the argument by each term on the RHS
of the inequality.
qProbs: Numpy array giving the quasiprobabilities
[A1213, A2223, A1223].
'''
# Check V1 == V2 and W2 == W3
if (V1!=V2).nnz==0 and np.array_equal(W2,W3):
# V1 == V2 and W2 == W3
dV1V2 = 1
dW2W3 = 1
TrW = np.trace(W2)
TrWV1 = np.trace(W2 @ V1)
TrWV2 = TrWV1
A1213 = calculate_A(V1, W2, V1, W2)
A2223 = A1213
A1223 = A1213
elif (V1!=V2).nnz==0 and not np.array_equal(W2,W3):
# V1 == V2 and W2 != W3
dV1V2 = 1
dW2W3 = 0
TrW = 0
TrWV1 = 0
TrWV2 = 0
A1213 = calculate_A(V1, W2, V1, W3)
A2223 = A1213
A1223 = A1213
elif (V1!=V2).nnz!=0 and np.array_equal(W2,W3):
# V1 != V2 and W2 == W3
dV1V2 = 0
dW2W3 = 1
TrW = np.trace(W2)
TrWV1 = np.trace(W2 @ V1)
TrWV2 = np.trace(W2 @ V2)
A1213 = calculate_A(V1, W2, V1, W2)
A2223 = calculate_A(V2, W2, V2, W2)
A1223 = calculate_A(V1, W2, V2, W2)
else:
# V1 != V2 and W2 != W3
dV1V2 = 0
dW2W3 = 0
TrW = 0
TrWV1 = 0
TrWV2 = 0
A1213 = calculate_A(V1, W2, V1, W3)
A2223 = calculate_A(V2, W2, V2, W3)
A1223 = calculate_A(V1, W2, V2, W3)
# Initialize the running minimum and associated
# decompositions of the argument.
f = np.inf
aOrd = np.array([])
aTerms = np.array([])
qProbs = np.array([A1213, A2223, A1223])
# Need to consider all possible valid
# combinations of weak measurement parameters.
for p1,g1 in pg1:
p2 = pg2[:,0]
g2 = pg2[:,1]
# Calculate leading order term
t1 = np.real(p1*p2*TrW*dW2W3)
LO = t1
# Calculate order g terms
t2 = np.real(2*np.sqrt(p1)*p2*np.real(g1)*TrWV1*dW2W3)
t3 = np.real(2*p1*np.sqrt(p2)*np.real(g2)*TrWV2*dW2W3)
NLO = t2 + t3
# Calculate order g**2 terms
t4 = np.real(p2*(np.abs(g1)**2)*A1213)
t5 = np.real(p1*(np.abs(g2)**2)*A2223)
t6 = np.real(2*np.sqrt(p1*p2)*np.real(g1*g2*A1223))
t7 = np.real(2*np.sqrt(p1*p2)*np.real(g1*np.conj(g2)) \
*TrWV1*dV1V2*dW2W3)
N2LO = t4 + t5 + t6 + t7
# Calculate order g**3 terms
t8 = np.real(2*np.sqrt(p2)*(np.abs(g1)**2)*np.real(g2*A1223) \
*dV1V2)
t9 = np.real(2*np.sqrt(p1)*(np.abs(g2)**2)*
|
np.real(g1*A1223)
|
numpy.real
|
import tensorflow as tf
import numpy as np
import pymysql
import datetime
seq_length = 7
data_dim = 1
hidden_dim = 10
output_dim = 1
#num_layer = 3
# connect db
con=pymysql.connect(host='192.168.3.11',port=3306,user='root',password='<PASSWORD>!',db='abeekx',charset='utf8')
cursor=con.cursor()
# if new data input then start main
cursor.execute("SELECT MAX(id) FROM sensors") # check new data in
pre_max_id=int(cursor.fetchone()[0])
max_id = pre_max_id
while( max_id == pre_max_id ):
con.commit()
cursor.execute("SELECT MAX(id) FROM sensors")
max_id=int(cursor.fetchone()[0])
# main
cursor.execute("SELECT temp,time FROM sensors")
xy=[]
time=[]
for row in cursor:
xy.append([float(row[0])])
time.append([str(row[1])]) # get timestamp
last_time=time[-1]
xy1=xy # pre Scalar data
numerator = xy - np.min(xy, 0) # MinMaxScalar
denominator = np.max(xy, 0) - np.min(xy, 0)
xy = (xy - np.min(xy, 0))/ (denominator + 1e-7)
x = xy
dataX = []
dataY = []
for i in range(0, len(x) - seq_length):
_x = x[i:i + seq_length]
_y = x[i+seq_length]
dataX.append(_x)
dataY.append(_y)
# train/test split
train_size = int(0.95*len(dataX))
trainX, testX = np.array(dataX[0:train_size]), np.array(dataX[train_size:len(dataX)])
trainY, testY = np.array(dataY[0:train_size]), np.array(dataY[train_size:len(dataY)])
timeY = np.array(time[train_size:len(dataY)])
X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
Y = tf.placeholder(tf.float32, [None, data_dim])
# build a LSTM network
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
'''Multi Layer LSTM network
cells = []
for _ in range(num_layers):
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.8)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells,state_is_tuple=True)
'''
outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
Y_pred = tf.contrib.layers.fully_connected(outputs[:,-1], output_dim, activation_fn=None)
targets = tf.placeholder(tf.float32, [None, data_dim])
predictions = tf.placeholder(tf.float32, [None, data_dim])
rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)))
sess=tf.Session()
saver=tf.train.Saver()
init = tf.global_variables_initializer()
sess.run(init)
save_path="./rnn_train.ckpt"
saver.restore(sess,save_path) # restore train variable
# Test step
test_predict = sess.run(Y_pred, feed_dict={X: testX})
# 5 times added prediction
temp_list=testX[-1]
temp_list=np.delete(temp_list, 0, axis=0)
temp_list= np.append(temp_list, xy[-1, 0].reshape(1, 1), axis=0)
test_predict1 = sess.run(Y_pred, feed_dict={X: temp_list.reshape(1, 7, 1)})
temp_list=np.delete(temp_list, 0,axis=0)
temp_list= np.append(temp_list, test_predict[-1, 0].reshape(1, 1), axis=0)
test_predict2 = sess.run(Y_pred, feed_dict={X: temp_list.reshape(1, 7, 1)})
temp_list=np.delete(temp_list, 0,axis=0)
temp_list= np.append(temp_list, test_predict[-1, 0].reshape(1, 1), axis=0)
test_predict3 = sess.run(Y_pred, feed_dict={X: temp_list.reshape(1, 7, 1)})
temp_list=np.delete(temp_list, 0,axis=0)
temp_list= np.append(temp_list, test_predict[-1, 0].reshape(1, 1), axis=0)
test_predict4 = sess.run(Y_pred, feed_dict={X: temp_list.reshape(1, 7, 1)})
temp_list=np.delete(temp_list, 0,axis=0)
temp_list= np.append(temp_list, test_predict[-1, 0].reshape(1, 1), axis=0)
test_predict5 = sess.run(Y_pred, feed_dict={X: temp_list.reshape(1, 7, 1)})
rmse_val = sess.run(rmse, feed_dict={targets: testY, predictions: test_predict})
print("RMSE: {}".format(rmse_val))
test_predict=np.append(test_predict, test_predict1, axis=0)
test_predict=np.append(test_predict, test_predict2, axis=0)
test_predict=np.append(test_predict, test_predict3, axis=0)
test_predict=np.append(test_predict, test_predict4, axis=0)
test_predict=np.append(test_predict, test_predict5, axis=0)
testY = testY*( np.max(xy1, 0) - np.min(xy1, 0) + 1e-7)+
|
np.min(xy1, 0)
|
numpy.min
|
"""Core API"""
from typing import Sequence, Union
import numpy as np
import torch
from opacus import GradSampleModule
from opacus.accountants import RDPAccountant
from opacus.accountants.utils import get_noise_multiplier
from opacus.optimizers import DPOptimizer
from opacus.privacy_engine import forbid_accumulation_hook
from opacus.utils.uniform_sampler import UniformWithReplacementSampler
from .data import NonUniformPoissonSampler, WeightedDataLoader
from .optimizers import DPSGDFOptimizer
from .utils import _data_loader_with_batch_sampler
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def reweigh(labels: Sequence[int]) -> np.ndarray:
"""Returns the inverse weighting for each sample in the dataset.
Args:
labels (Sequence[int]):
The relevant sensitive group labels of the dataset.
Returns:
np.ndarray:
The inverse weighting for each sample in the dataset.
Usage:
>>> reweigh([0, 1, 1, 2, 3])
array([0.25, 0.125, 0.125, 0.25, 0.25])
"""
num_samples = len(labels)
sensitive_groups = []
for value, counts in zip(*np.unique(labels, return_counts=True)):
sensitive_groups.append((labels == value, counts))
n_unique = len(sensitive_groups)
target_prob = 1 / n_unique
weights = np.zeros(num_samples)
for mask, counts in sensitive_groups:
weights[mask] = target_prob / counts
return weights
def latent_reweigh(
train_loader: torch.utils.data.DataLoader,
vae: torch.nn.Module,
alpha: float = 0.01,
k: int = 16,
) -> np.ndarray:
"""Returns the inverse weighting for each sample in the dataset computed
using the latent distributions.
Args:
train_loader (torch.utils.data.DataLoader):
The relevant training data loader.
vae (torch.nn.Module):
The relevant VAE model.
alpha (float):
The hyperparameter for the latent space. Defaults to 0.01.
k (int):
The number of samples to use for the latent space. Defaults to 16.
Returns:
np.ndarray:
The inverse weighting for each sample in the dataset.
"""
dataloader = torch.utils.data.DataLoader(
train_loader.dataset, batch_size=train_loader.batch_size, shuffle=False
)
mus = []
for batch in dataloader:
mu, _ = vae.encode(batch[0].to(device))
mus.append(mu.cpu().detach().numpy())
mu = np.concatenate(mus)
weights = np.zeros(mu.shape[0])
latent_dim = mu.shape[1]
for i in range(latent_dim):
hist, bin_edges = np.histogram(mu[:, i], density=True, bins=k)
bin_edges[0] = float("-inf")
bin_edges[-1] = float("inf")
hist += alpha
hist = hist / hist.sum()
bin_idxs = np.digitize(mu[:, i], bin_edges)
p = 1.0 / hist[bin_idxs - 1]
p /= p.sum()
weights = np.maximum(weights, p)
weights /= weights.sum()
return weights
def setup_weighted_dpsgd(
data_loader: torch.utils.data.DataLoader,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
weights: Union[Sequence[float], np.ndarray],
target_epsilon: float,
target_delta: float,
max_grad_norm: float,
epochs: int,
):
"""Sets up the DP-SGD-W optimizer.
Args:
data_loader (torch.utils.data.DataLoader):
The training data loader.
model (torch.nn.Module):
The model to be used during training.
optimizer (torch.optim.Optimizer):
The optimizer to be used during training.
weights (Union[Sequence[float], np.ndarray]):
The weights for each sample in the dataset.
target_epsilon (float):
The target epsilon for DP-SGD-W.
target_delta (float):
The target delta for DP-SGD-W.
max_grad_norm (float):
The gradient clipping bound for DP-SGD-W.
"""
weights = np.array(weights)
model = GradSampleModule(model)
model.register_forward_pre_hook(forbid_accumulation_hook)
N = len(data_loader.dataset)
sample_rate = 1 / len(data_loader)
max_sample_rate = np.max(weights) * N * sample_rate
expected_batch_size = int(N * sample_rate)
batch_sampler = NonUniformPoissonSampler(
weights=weights, num_samples=N, sample_rate=sample_rate
)
dp_loader = _data_loader_with_batch_sampler(data_loader, batch_sampler, wrap=False)
accountant = RDPAccountant()
optimizer = DPOptimizer(
optimizer=optimizer,
noise_multiplier=get_noise_multiplier(
target_epsilon=target_epsilon,
target_delta=target_delta,
sample_rate=max_sample_rate,
steps=int(epochs / sample_rate),
accountant=accountant.mechanism(),
),
max_grad_norm=max_grad_norm,
expected_batch_size=expected_batch_size,
)
optimizer.attach_step_hook(
accountant.get_optimizer_hook_fn(sample_rate=sample_rate)
)
return dp_loader, model, optimizer, accountant
def setup_adaptive_clipped_dpsgd(
data_loader: torch.utils.data.DataLoader,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
target_epsilon: float,
target_delta: float,
epochs: int,
clipping: str = "dpsgdf",
**kwargs
):
"""Sets up the DP-SGD-W optimizer.
Args:
data_loader (torch.utils.data.DataLoader):
The training data loader.
model (torch.nn.Module):
The model to be used during training.
optimizer (torch.optim.Optimizer):
The optimizer to be used during training.
target_epsilon (float):
The target epsilon for DP-SGD-W.
target_delta (float):
The target delta for DP-SGD-W.
max_grad_norm (float):
The gradient clipping bound for DP-SGD-W.
clipping (str):
The clipping method to use. Takes values ["dpsgdf", "fairdp"].
Defaults to "dpsgdf".
**kwargs:
Passed to the ``opacus.optimizers.DPOptimizer`` wrapper.
"""
model = GradSampleModule(model)
model.register_forward_pre_hook(forbid_accumulation_hook)
N = len(data_loader.dataset)
sample_rate = 1 / len(data_loader)
expected_batch_size = int(N * sample_rate)
batch_sampler = UniformWithReplacementSampler(
num_samples=N, sample_rate=sample_rate
)
dp_loader = _data_loader_with_batch_sampler(data_loader, batch_sampler, wrap=True)
accountant = RDPAccountant()
if clipping == "dpsgdf":
optimizer = DPSGDFOptimizer(
optimizer=optimizer,
noise_multiplier=get_noise_multiplier(
target_epsilon=target_epsilon,
target_delta=target_delta,
sample_rate=sample_rate,
steps=int(epochs / sample_rate),
accountant=accountant.mechanism(),
),
expected_batch_size=expected_batch_size,
**kwargs
)
else:
raise ValueError("``clipping`` must be one of ['dpsgdf', 'fairdp']")
optimizer.attach_step_hook(
accountant.get_optimizer_hook_fn(sample_rate=sample_rate)
)
return dp_loader, model, optimizer, accountant
def create_teacher_loaders(
dataset: torch.utils.data.Dataset, n_teachers: int, batch_size: int
) -> Sequence[torch.utils.data.DataLoader]:
teacher_loaders = []
n_train = len(dataset)
shuffled_idxs = np.random.permutation(n_train)
size = n_train // n_teachers
for i in range(n_teachers):
idxs = shuffled_idxs[i * size : min((i + 1) * size, n_train)]
subset_data = torch.utils.data.Subset(dataset, idxs)
loader = torch.utils.data.DataLoader(
subset_data, batch_size=batch_size, shuffle=True
)
teacher_loaders.append(loader)
return teacher_loaders
def create_weighted_teacher_loaders(
dataset: torch.utils.data.Dataset,
n_teachers: int,
batch_size: int,
weights: Union[Sequence[float], np.ndarray, torch.Tensor],
) -> Sequence[WeightedDataLoader]:
teacher_loaders = []
n_train = len(dataset)
shuffled_idxs =
|
np.random.permutation(n_train)
|
numpy.random.permutation
|
# -*- coding: utf-8 -*-
import numpy as np
def parametric_eurocode1(A_t, A_f, A_v, h_eq, q_fd, lambda_, rho, c, t_lim, time_end=7200, time_step=1, time_start=0, time_padding = (0, 0),temperature_initial=293.15, is_more_return=False):
"""Function Description: (SI UNITS ONLY)
This function calculates the time-temperature curve according to Eurocode 1 part 1-2, Appendix A.
:param A_t:
:param A_f:
:param A_v:
:param h_eq:
:param q_fd:
:param lambda_:
:param rho:
:param c:
:param t_lim:
:param time_end:
:param time_step:
:param time_start:
:param time_padding:
:param temperature_initial:
:return t:
:return T_g:
"""
# Reference: Eurocode 1991-1-2; <NAME>, <NAME> (2010) - Fire Design of Steel Structures
# UNITS: SI -> Equations
q_fd /= 1e6 # [J/m2] -> [MJ/m2]
t_lim /= 3600 # [s] -> [hr]
time_end /= 3600 # [s] -> [hr]
time_step /= 3600 # [s] -> [hr]
time_start /= 3600 # [s] -> [hr]
temperature_initial -= 273.15 # [K] -> [C]
# ACQUIRING REQUIRED VARIABLES
t = np.arange(time_start, time_end, time_step, dtype=float)
b = (lambda_ * rho * c) ** 0.5
O = A_v * h_eq**0.5 / A_t
q_td = q_fd * A_f / A_t
Gamma = ((O/0.04)/(b/1160))**2
t_max = 0.0002*q_td/O
# check criteria
if not 50 <= q_td <= 1000:
print("q_td = {:4.1f} not in range [50, 1000]".format(q_td))
# CALCULATION
def _temperature_heating(t_star, temperature_initial):
# eq. 3.12
T_g = 1325 * (1 - 0.324*np.exp(-0.2*t_star) - 0.204*np.exp(-1.7*t_star) - 0.472*np.exp(-19*t_star))
T_g += temperature_initial
return T_g
def _temperature_cooling_vent(t_star_max, T_max, t_star): # ventilation controlled
# eq. 3.16
if t_star_max <= 0.5:
T_g = T_max - 625 * (t_star - t_star_max)
elif 0.5 < t_star_max < 2.0:
T_g = T_max - 250 * (3 - t_star_max) * (t_star - t_star_max)
elif 2.0 <= t_star_max:
T_g = T_max - 250 * (t_star - t_star_max)
else: T_g = np.nan
return T_g
def _temperature_cooling_fuel(t_star_max, T_max, t_star, Gamma, t_lim): # fuel controlled
# eq. 3.22
if t_star_max <= 0.5:
T_g = T_max - 625 * (t_star - Gamma * t_lim)
elif 0.5 < t_star_max < 2.0:
T_g = T_max - 250 * (3 - t_star_max) * (t_star - Gamma * t_lim)
elif 2.0 <= t_star_max:
T_g = T_max - 250 * (t_star - Gamma * t_lim)
else: T_g = np.nan
return T_g
def _variables(t, Gamma, t_max):
t_star = Gamma * t
t_star_max = Gamma * t_max
return t_star, t_star_max
def _variables_2(t, t_lim, q_td, b, O):
O_lim = 0.0001 * q_td / t_lim
Gamma_lim = ((O_lim/0.04)/(b/1160))**2
if O > 0.04 and q_td < 75 and b < 1160:
k = 1 + ((O-0.04)/(0.04)) * ((q_td-75)/(75)) * ((1160-b)/(1160))
Gamma_lim *= k
t_star_ = Gamma_lim * t
t_star_max_ = Gamma_lim * t_lim
return t_star_, t_star_max_
t_star, t_star_max = _variables(t, Gamma, t_max)
if t_max >= t_lim: # ventilation controlled fire
T_max = _temperature_heating(t_star_max, temperature_initial)
T_heating_g = _temperature_heating(Gamma * t, temperature_initial)
T_cooling_g = _temperature_cooling_vent(t_star_max, T_max, t_star)
fire_type = "ventilation controlled"
else: # fuel controlled fire
t_star_, t_star_max_ = _variables_2(t, t_lim, q_td, b, O)
T_max = _temperature_heating(t_star_max_, temperature_initial)
T_heating_g = _temperature_heating(t_star_, temperature_initial)
T_cooling_g = _temperature_cooling_fuel(t_star_max, T_max, t_star, Gamma, t_lim)
fire_type = "fuel controlled"
T_g = np.minimum(T_heating_g, T_cooling_g)
T_g[T_g < temperature_initial] = temperature_initial
data_all = {"fire_type": fire_type}
# UNITS: Eq. -> SI
t *= 3600
T_g += 273.15
if is_more_return:
return t, T_g, data_all
else:
return t, T_g
def standard_fire_iso834(
time,
temperature_initial
):
# INPUTS CHECK
time = np.array(time, dtype=float)
time[time < 0] = np.nan
# SI UNITS -> EQUATION UNITS
temperature_initial -= 273.15 # [K] -> [C]
time /= 60. # [s] - [min]
# CALCULATE TEMPERATURE BASED ON GIVEN TIME
temperature = 345. * np.log10(time * 8. + 1.) + temperature_initial
temperature[temperature == np.nan] = temperature_initial
# EQUATION UNITS -> SI UNITS
time *= 60. # [min] -> [s]
temperature += 273.15 # [C] -> [K]
return time, temperature
def standard_fire_astm_e119(
time,
temperature_ambient
):
time /= 1200. # convert from seconds to hours
temperature_ambient -= 273.15 # convert temperature from kelvin to celcius
temperature = 750 * (1 - np.exp(-3.79553 * np.sqrt(time))) + 170.41 * np.sqrt(time) + temperature_ambient
return temperature + 273.15 # convert from celsius to kelvin (SI unit)
def hydrocarbon_eurocode(
time,
temperature_initial
):
time /= 1200. # convert time unit from second to hour
temperature_initial -= 273.15 # convert temperature from kelvin to celsius
temperature = 1080 * (1 - 0.325 * np.exp(-0.167 * time) - 0.675 * np.exp(-2.5 * time)) + temperature_initial
return temperature + 273.15
def external_fire_eurocode(
time,
temperature_initial
):
time /= 1200. # convert time from seconds to hours
temperature_initial -= 273.15 # convert ambient temperature from kelvin to celsius
temperature = 660 * (1 - 0.687 * np.exp(-0.32 * time) - 0.313 * np.exp(-3.8 * time)) + temperature_initial
return temperature + 273.15 # convert temperature from celsius to kelvin
def travelling_fire(
T_0,
q_fd,
RHRf,
l,
w,
s,
# A_v,
# h_eq,
h_s,
l_s,
temperature_max=1050,
time_ubound=10800,
time_step=1):
"""
:param T_0: [float][K] Initial temperature.
:param q_fd: [float][J m2] Fire load density.
:param RHRf: [float][W m2] Heat release rate density
:param l: [float][m] Compartment length
:param w: [float][m] Compartment width
:param s: [float][m/s] Fire spread speed
# :param A_v: [float][m2] Ventilation area
# :param h_eq: [float][m] Weighted ventilation height
:param h_s: [float][m] Vertical distance between element to fuel bed.
:param l_s: [float][m] Horizontal distance between element to fire front.
:param time_ubound: [float][s] Maximum time for the curve.
:param time_step: [float][s] Static time step.
:return time: [ndarray][s] An array representing time incorporating 'temperature'.
:return temperature: [ndarray][K] An array representing temperature incorporating 'time'.
"""
# SETTINGS
time_lbound = 0
# UNIT CONVERSION TO FIT EQUATIONS
T_0 -= 273.15
q_fd /= 1e6
RHRf /= 1e6
# MAKE TIME ARRAY
time = np.arange(time_lbound, time_ubound+time_step, time_step)
# fire_load_density_MJm2=900
# heat_release_rate_density_MWm2=0.15
# length_compartment_m=150
# width_compartment_m=17.4
# fire_spread_rate_ms=0.012
# area_ventilation_m2=190
# height_ventilation_opening_m=3.3
# height_fuel_to_element_m=3.5
# length_element_to_fire_origin_m=105
# workout burning time etc.
t_burn = max([q_fd / RHRf, 900.])
t_decay = max([t_burn, l / s])
t_lim = min([t_burn, l / s])
# reduce resolution to fit time step for t_burn, t_decay, t_lim
t_decay_ = round(t_decay/time_step, 0) * time_step
t_lim_ = round(t_lim/time_step, 0) * time_step
if t_decay_ == t_lim_: t_lim_ -= time_step
# workout the heat release rate ARRAY (corrected with time)
Q_growth = (RHRf * w * s * time) * (time < t_lim_)
Q_peak = min([RHRf * w * s * t_burn, RHRf * w * l]) * (time >= t_lim_) * (time <= t_decay_)
Q_decay = (max(Q_peak) - (time-t_decay_) * w * s * RHRf) * (time > t_decay_)
Q_decay[Q_decay < 0] = 0
Q = (Q_growth + Q_peak + Q_decay) * 1000.
# workout the distance between fire_curve midian to the structural element r
l_fire_front = s * time
l_fire_front[l_fire_front < 0] = 0.
l_fire_front[l_fire_front > l] = l
l_fire_end = s * (time - t_lim)
l_fire_end[l_fire_end < 0] = 0.
l_fire_end[l_fire_end > l] = l
l_fire_median = (l_fire_front + l_fire_end) / 2.
r =
|
np.absolute(l_s - l_fire_median)
|
numpy.absolute
|
from itertools import cycle
import numpy as np
from numpy import zeros, searchsorted, allclose
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.op2.result_objects.op2_objects import BaseElement
from pyNastran.op2.tables.oef_forces.oef_force_objects import ForceObject
from pyNastran.f06.f06_formatting import write_imag_floats_13e, write_float_12e # get_key0,
from pyNastran.f06.f06_formatting import _eigenvalue_header
class ComplexForceObject(ForceObject):
def __init__(self, data_code, isubcase, apply_data_code=True):
ForceObject.__init__(self, data_code, isubcase, apply_data_code=apply_data_code)
@property
def is_real(self):
return False
@property
def is_complex(self):
return True
class ComplexRodForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if is_sort1:
pass
else:
raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['axial_force', 'torque']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexRodForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_force, torque]
self.data = zeros((self.ntimes, self.ntotal, 2), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(column_values, column_names,
headers, self.element, self.data)
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=self.element, minor_axis=headers).to_frame()
#data_frame.columns.names = column_names
#data_frame.index.names = ['ElementID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(axial1, torque1) = t1
(axial2, torque2) = t2
if not allclose(t1, t2):
msg += '(%s) (%s, %s) (%s, %s)\n' % (
eid,
axial1, torque1,
axial2, torque2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, axial, torque):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [axial, torque]
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
if self.element_type == 1: # CROD
msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C R O D )\n']
elif self.element_type == 10: # CONROD
msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C O N R O D )\n']
elif self.element_type == 3: # CTUBE
msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C T U B E )\n']
#pass
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n']
else:
msg += [' (REAL/IMAGINARY)\n']
if is_sort1:
msg += [
' \n'
' ELEMENT AXIAL TORSIONAL\n'
' ID. STRAIN STRAIN\n'
]
#' 14 0.0 / 0.0 0.0 / 0.0'
else:
raise NotImplementedError('sort2')
return self.element_name, msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
#is_odd = False
#nwrite = len(eids)
#if len(eids) % 2 == 1:
#nwrite -= 1
#is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
for eid, axiali, torsioni in zip(eids, axial, torsion):
out = write_imag_floats_13e([axiali, torsioni], is_mag_phase)
[raxial, rtorsion, iaxial, itorsion] = out
#ELEMENT AXIAL TORSIONAL
#ID. STRESS STRESS
#14 0.0 / 0.0 0.0 / 0.0
f06_file.write(' %8i %-13s / %-13s %-13s / %s\n' % (eid, raxial, iaxial, rtorsion, itorsion))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
if self.is_sort1:
struct1 = Struct(endian + b'i4f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
for eid_device, axiali, torsioni in zip(eids_device, axial, torsion):
data = [eid_device, axiali.real, torsioni.real, axiali.imag, torsioni.imag]
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, tuple(data)))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCShearForceArray(BaseElement):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
BaseElement.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
@property
def is_real(self):
return False
@property
def is_complex(self):
return True
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
headers = [
'force41', 'force14', 'force21', 'force12', 'force32', 'force23',
'force43', 'force34', 'kickForce1', 'kickForce2', 'kickForce3',
'kickForce4', 'shear12', 'shear23', 'shear34', 'shear41'
]
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexCShearForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[force41, force14, force21, force12, force32, force23, force43, force34,
#kick_force1, kick_force2, kick_force3, kick_force4,
#shear12, shear23, shear34, shear41]
self.data = zeros((self.ntimes, self.ntotal, 16), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Mode 1 2
#EigenvalueReal -0.0 -0.0
#EigenvalueImag -0.0 -0.0
#Damping 0.0 0.0
#ElementID Item
#22 force41 2.927977e-10+0.000000e+00j 0.000000+0.000000j
# force14 2.927977e-10+5.855954e-10j 0.000000+0.000000j
# force21 -2.927977e-10+0.000000e+00j 0.000000+0.000000j
# force12 -2.927977e-10+5.855954e-10j 0.000000+0.000000j
# force32 2.927977e-10+0.000000e+00j 0.000000+0.000000j
# force23 2.927977e-10+5.855954e-10j 0.000000+0.000000j
# force43 -2.927977e-10+0.000000e+00j 0.000000+0.000000j
# force34 -2.927977e-10+5.855954e-10j 0.000000+0.000000j
# kickForce1 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# kickForce2 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# kickForce3 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# kickForce4 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# shear12 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# shear23 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# shear34 0.000000e+00+0.000000e+00j 0.000000+0.000000j
# shear41 0.000000e+00+0.000000e+00j 0.000000+0.000000j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(force41a, force14a, force21a, force12a, force32a, force23a, force43a, force34a,
kick_force1a, kick_force2a, kick_force3a, kick_force4a,
shear12a, shear23a, shear34a, shear41a) = t1
(force41b, force14b, force21b, force12b, force32b, force23b, force43b, force34b,
kick_force1b, kick_force2b, kick_force3b, kick_force4b,
shear12b, shear23b, shear34b, shear41b) = t2
if not allclose(t1, t2):
msg += (
'%s (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n'
' (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
force41a, force14a, force21a, force12a, force32a, force23a,
force43a, force34a, kick_force1a, kick_force2a, kick_force3a,
kick_force4a, shear12a, shear23a, shear34a, shear41a,
force41b, force14b, force21b, force12b, force32b, force23b,
force43b, force34b, kick_force1b, kick_force2b, kick_force3b,
kick_force4b, shear12b, shear23b, shear34b, shear41b
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid,
force41, force14, force21, force12, force32, force23, force43, force34,
kick_force1, kick_force2, kick_force3, kick_force4,
shear12, shear23, shear34, shear41):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [
force41, force14, force21, force12, force32, force23, force43, force34,
kick_force1, kick_force2, kick_force3, kick_force4,
shear12, shear23, shear34, shear41]
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
msg = [' C O M P L E X F O R C E S A C T I N G O N S H E A R P A N E L E L E M E N T S (CSHEAR)\n']
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n \n']
else:
msg += [' (REAL/IMAGINARY)\n \n']
if is_sort1:
msg += [
' ====== POINT 1 ====== ====== POINT 2 ====== ====== POINT 3 ====== ====== POINT 4 ======\n'
' ELEMENT F-FROM-4 F-FROM-2 F-FROM-1 F-FROM-3 F-FROM-2 F-FROM-4 F-FROM-3 F-FROM-1\n'
' ID KICK-1 SHEAR-12 KICK-2 SHEAR-23 KICK-3 SHEAR-34 KICK-4 SHEAR-41\n'
]
else:
raise NotImplementedError('sort2')
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
## TODO: I'm sure this ordering is wrong...
force41 = self.data[itime, :, 0]
force14 = self.data[itime, :, 1]
force21 = self.data[itime, :, 2] # TODO: this is wrong...
force12 = self.data[itime, :, 3]
force32 = self.data[itime, :, 4]
force23 = self.data[itime, :, 5]
force43 = self.data[itime, :, 6]
force34 = self.data[itime, :, 7]
kick_force1 = self.data[itime, :, 8]
kick_force2 = self.data[itime, :, 9]
kick_force3 = self.data[itime, :, 10]
kick_force4 = self.data[itime, :, 11]
shear12 = self.data[itime, :, 12]
shear23 = self.data[itime, :, 13]
shear34 = self.data[itime, :, 14]
shear41 = self.data[itime, :, 15]
assert len(force12) > 0, force12
for (eid, iforce41, force14i, iforce21, iforce12, iforce32, iforce23, iforce43, iforce34,
ikick_force1, ikick_force2, ikick_force3, ikick_force4,
ishear12, ishear23, ishear34, ishear41) in zip(
eids, force41, force14, force21, force12, force32, force23, force43, force34,
kick_force1, kick_force2, kick_force3, kick_force4,
shear12, shear23, shear34, shear41):
vals2 = write_imag_floats_13e([
iforce41, force14i, iforce21, iforce12, iforce32, iforce23, iforce43, iforce34,
ikick_force1, ikick_force2, ikick_force3, ikick_force4,
ishear12, ishear23, ishear34, ishear41], is_mag_phase)
[
force41r, force14r, force21i, force12r, force32r, force23r, force43r, force34r,
kick_force1r, kick_force2r, kick_force3r, kick_force4r,
shear12r, shear23r, shear34r, shear41r,
force41i, force14i, force21i, force12i, force32i, force23i, force43i, force34i,
kick_force1i, kick_force2i, kick_force3i, kick_force4i,
shear12i, shear23i, shear34i, shear41i
] = vals2
#complex_cshear_force_f06
#' ====== POINT 1 ====== ====== POINT 2 ====== ====== POINT 3 ====== ====== POINT 4 ======'
#' ELEMENT F-FROM-4 F-FROM-2 F-FROM-1 F-FROM-3 F-FROM-2 F-FROM-4 F-FROM-3 F-FROM-1'
#' ID KICK-1 SHEAR-12 KICK-2 SHEAR-23 KICK-3 SHEAR-34 KICK-4 SHEAR-41'
#' 25 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
f06_file.write(
' %8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'% (
eid,
force41r, force14r, force21i, force12r, force32r, force23r, force43r, force34r,
kick_force1r, kick_force2r, kick_force3r, kick_force4r,
shear12r, shear23r, shear34r, shear41r,
force41i, force14i, force21i, force12i, force32i, force23i, force43i, force34i,
kick_force1i, kick_force2i, kick_force3i, kick_force4i,
shear12i, shear23i, shear34i, shear41i
))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class ComplexSpringDamperForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['spring_force']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexSpringDamperForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_force, torque]
self.data = zeros((self.ntimes, self.ntotal, 1), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Mode 1 2
#EigenvalueReal -0.0 -0.0
#EigenvalueImag -0.0 -0.0
#Damping 0.0 0.0
#ElementID Item
#30 spring_force 0.000000+0.000000j 0.000000+0.000000j
#31 spring_force 0.000000+0.000000j 0.000000+0.000000j
#32 spring_force 0.000000+0.000000j 0.000000+0.000000j
#33 spring_force 0.000000+0.000000j 0.000000+0.000000j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, 0]
t2 = table.data[itime, ie, 0]
if not allclose([t1.real, t1.imag], [t2.real, t2.imag], atol=0.0001):
msg += '%s (%s, %s) (%s, %s)\n' % (
eid,
t1.real, t1.imag,
t2.real, t2.imag)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, force):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, 0] = force
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
# 11-CELAS1, 12-CELAS2, 13-CELAS3, 14-CELAS4
if self.element_type == 11:
msg = [' C O M P L E X F O R C E S I N S C A L A R S P R I N G S ( C E L A S 1 )\n']
elif self.element_type == 12:
msg = [' C O M P L E X F O R C E S I N S C A L A R S P R I N G S ( C E L A S 2 )\n']
elif self.element_type == 13:
msg = [' C O M P L E X F O R C E S I N S C A L A R S P R I N G S ( C E L A S 3 )\n']
elif self.element_type == 14:
msg = [' C O M P L E X F O R C E S I N S C A L A R S P R I N G S ( C E L A S 4 )\n']
elif self.element_type == 20: # CDAMP1
msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 1 )\n']
elif self.element_type == 21: # CDAMP2
msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 2 )\n']
elif self.element_type == 22: # CDAMP3
msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 3 )\n']
elif self.element_type == 23: # CDAMP4
msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 4 )\n']
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n \n']
else:
msg += [' (REAL/IMAGINARY)\n \n']
if is_sort1:
msg += [
' ELEMENT ELEMENT\n'
' ID. FORCE ID. FORCE\n'
]
#' 14 0.0 / 0.0 0.0 / 0.0'
else:
msg += [' FREQUENCY FORCE FREQUENCY FORCE\n']
return msg
#def get_element_index(self, eids):
## elements are always sorted; nodes are not
#itot = searchsorted(eids, self.element) #[0]
#return itot
#def eid_to_element_node_index(self, eids):
##ind = ravel([searchsorted(self.element == eid) for eid in eids])
#ind = searchsorted(eids, self.element)
##ind = ind.reshape(ind.size)
##ind.sort()
#return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
#is_odd = False
#nwrite = len(eids)
#if len(eids) % 2 == 1:
#nwrite -= 1
#is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
spring_force = self.data[itime, :, 0]
for eid, spring_forcei in zip(eids, spring_force):
[rspring, ispring] = write_imag_floats_13e([spring_forcei], is_mag_phase)
#ELEMENT AXIAL TORSIONAL
#ID. STRESS STRESS
#14 0.0 / 0.0 0.0 / 0.0
f06_file.write(' %8i %-13s / %-13s\n' % (eid, rspring, ispring))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i2f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
force = self.data[itime, :, 0]
for eid, forcei in zip(eids_device, force):
data = [eid, forcei.real, forcei.imag]
op2_ascii.write(' eid=%s force=%s\n' % (eid, forcei))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexSpringForceArray(ComplexSpringDamperForceArray): # 11-CELAS1,12-CELAS2,13-CELAS3, 14-CELAS4
def __init__(self, data_code, is_sort1, isubcase, dt):
ComplexSpringDamperForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
class ComplexDamperForceArray(ComplexSpringDamperForceArray):
def __init__(self, data_code, is_sort1, isubcase, dt):
ComplexSpringDamperForceArray.__init__(self, data_code, is_sort1, isubcase, dt)
class ComplexViscForceArray(BaseElement):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
BaseElement.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if is_sort1:
pass
else:
raise NotImplementedError('SORT2')
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
headers = ['axial_force', 'torque']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexViscForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_force, torque]
self.data = zeros((self.ntimes, self.ntotal, 2), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Mode 1 2 3 4
#EigenvalueReal -0.0 -0.0 -0.0 -0.0
#EigenvalueImag -0.0 -0.0 -0.0 -0.0
#Damping 0.0 0.0 0.0 0.0
#ElementID Item
#50 axial_force (-0+0j) (-0+0j) (-0+0j) (-0+0j)
# torque (-0+0j) (-0+0j) (-0+0j) (-0+0j)
#51 axial_force (-0+0j) (-0+0j) (-0+0j) (-0+0j)
# torque 0j (-0+0j) (-0+0j) (-0+0j)
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(column_values, column_names,
headers, self.element, self.data)
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(axial1, torque1) = t1
(axial2, torque2) = t2
if not allclose(t1, t2):
msg += '(%s) (%s, %s) (%s, %s)\n' % (
eid,
axial1, torque1,
axial2, torque2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, axial, torque):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [axial, torque]
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
#if self.element_type == 1: # CROD
#msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C R O D )\n']
#elif self.element_type == 10: # CONROD
#msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C O N R O D )\n']
#elif self.element_type == 3: # CTUBE
#msg = [' C O M P L E X F O R C E S I N R O D E L E M E N T S ( C T U B E )\n']
##pass
if self.element_type == 24:
msg = [' C O M P L E X F O R C E S I N V I S C E L E M E N T S ( C V I S C )\n']
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n']
else:
msg += [' (REAL/IMAGINARY)\n']
if is_sort1:
msg += [
' \n'
' ELEMENT AXIAL TORSIONAL\n'
' ID. STRAIN STRAIN\n'
]
#' 14 0.0 / 0.0 0.0 / 0.0'
else:
raise NotImplementedError('sort2')
return self.element_name, msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
#is_odd = False
#nwrite = len(eids)
#if len(eids) % 2 == 1:
#nwrite -= 1
#is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
torsion = self.data[itime, :, 1]
for eid, axiali, torsioni in zip(eids, axial, torsion):
out = write_imag_floats_13e([axiali, torsioni], is_mag_phase)
[raxial, rtorsion, iaxial, itorsion] = out
#ELEMENT AXIAL TORSIONAL
#ID. STRESS STRESS
#14 0.0 / 0.0 0.0 / 0.0
f06_file.write(' %8i %-13s / %-13s %-13s / %s\n' %
(eid, raxial, iaxial, rtorsion, itorsion))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class ComplexPlateForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['mx', 'my', 'mxy', 'bmx', 'bmy', 'bmxy', 'tx', 'ty']
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexPlateForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.data = zeros((self.ntimes, self.ntotal, 8), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
# Freq 0.00001 10.00000 20.00000 30.00000 40.00000 50.00000 60.00000
# ElementID Item
#8 mx 0j 0j 0j 0j (-361.6303-680.04156j) 0j 0j
# my 0j 0j 0j 0j (-7884.6196-14826.936j) 0j 0j
# mxy 0j 0j 0j 0j (-237.5723-446.7519j) 0j 0j
# bmx 0j 0j 0j 0j (5.514431+10.3698225j) 0j 0j
# bmy 0j 0j 0j 0j (10.107019+19.00613j) 0j 0j
# bmxy 0j 0j 0j 0j (-16.361727-30.768036j) 0j 0j
# tx 0j 0j 0j 0j (18.819313+35.3895j) 0j 0j
# ty 0j 0j 0j 0j (-61.55238-115.74853j) 0j 0j
#9 mx 0j 0j 0j 0j (1086.9078+2043.9175j) 0j 0j
# my 0j 0j 0j 0j (8089.895+15212.953j) 0j 0j
# mxy 0j 0j 0j 0j (-4725.3286-8885.925j) 0j 0j
# bmx 0j 0j 0j 0j (-3.9810739-7.486363j) 0j 0j
# bmy 0j 0j 0j 0j (-10.283798-19.338562j) 0j 0j
# bmxy 0j 0j 0j 0j (-8.663734-16.292051j) 0j 0j
# tx 0j 0j 0j 0j (54.14508+101.81919j) 0j 0j
# ty 0j 0j 0j 0j (-61.92162-116.44288j) 0j 0j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(column_values, column_names,
headers, self.element, self.data)
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=self.element, minor_axis=headers).to_frame()
#data_frame.columns.names = column_names
#data_frame.index.names = ['ElementID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1) = t1
(mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2) = t2
if not allclose(t1, t2):
#if not np.array_equal(t1.real, t2.real):
msg += ('%-8s (%s, %s, %s, %s, %s, %s, %s, %s)\n'
'%-8s (%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
#mx1.real, my1.real, mxy1.real, bmx1.real, bmy1.real,
#bmxy1.real, tx1.real, ty1.real,
mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1,
'',
mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2,
#mx2.real, my2.real, mxy2.real, bmx2.real, bmy2.real,
#bmxy2.real, tx2.real, ty2.real,
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.ielement += 1
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
loads = [' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID FX FY FXY MX MY MXY QX QY\n',]
if is_mag_phase:
mag_real = [' (MAGNITUDE/PHASE)\n \n']
else:
mag_real = [' (REAL/IMAGINARY)\n \n']
cquad4_bilinear = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'] # good
cquad4_linear = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'] # good
ctria3 = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n'] # good
cquad8 = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 8 )\n']
cquadr = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D R )\n']
ctria6 = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 6 )\n']
ctriar = [' C O M P L E X F O R C E S I N <NAME> N <NAME> R E L E M E N T S ( T R I A R )\n']
#is_bilinear = False
if self.element_type == 144: # CQUAD4
msg = cquad4_linear + mag_real + loads
elif self.element_type == 33: # CQUAD4
msg = cquad4_bilinear + mag_real + loads
elif self.element_type == 64: #CQUAD8
msg = cquad8 + mag_real + loads
elif self.element_type == 82: # CQUADR
msg = cquadr + mag_real + loads
elif self.element_type == 74: # CTRIA3
msg = ctria3 + mag_real + loads
elif self.element_type == 75: # CTRIA6
msg = ctria6 + mag_real + loads
elif self.element_type == 70: # CTRIAR
msg = ctriar + mag_real + loads
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
for eid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
out = write_imag_floats_13e([mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi], is_mag_phase)
[smxr, smyr, smxyr, sbmxr, sbmyr, sbmxyr, stxr, styr,
smxi, smyi, smxyi, sbmxi, sbmyi, sbmxyi, stxi, styi] = out
#"""
#ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -
#ID FX FY FXY MX MY MXY QX QY
#0 564 1.543439E+03 7.311177E+02 1.322702E+02 1.080178E+00 1.699104E+00 2.618547E-01 3.877034E+01 4.518554E+00
#358.3129 358.0245 177.5593 177.5292 178.2112 0.0907 358.1465 179.4567
#"""
# fx fy fxy mx my mxy qx qy
f06_file.write(
'0 %8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, smxr, smyr, smxyr, sbmxr, sbmyr, sbmxyr, stxr, styr,
'', smxi, smyi, smxyi, sbmxi, sbmyi, sbmxyi, stxi, styi))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
eids = self.element
eids_device = eids * 10 + self.device_code
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i 16f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
for eid_device, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids_device, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
data = [eid_device,
mxi.real, myi.real, mxyi.real, bmxi.real, bmyi.real, bmxyi.real, txi.real, tyi.real,
mxi.imag, myi.imag, mxyi.imag, bmxi.imag, bmyi.imag, bmxyi.imag, txi.imag, tyi.imag]
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexPlate2ForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['mx', 'my', 'mxy', 'bmx', 'bmy', 'bmxy', 'tx', 'ty']
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexPlate2ForceArray"""
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
self.element_node = zeros((self.ntotal, 2), dtype='int32')
#[mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.data = zeros((self.ntimes, self.ntotal, 8), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
assert 0 not in self.element
#print(self.element_node)
element_node = [self.element_node[:, 0], self.element_node[:, 1]]
assert 0 not in self.element_node[:, 0]
if self.nonlinear_factor not in (None, np.nan):
# Freq 0.00001 10.00000 20.00000 30.00000 40.00000 50.00000 60.00000
# ElementID NodeID Item
# 6 0 mx 0j 0j 0j 0j (-705.7376-1327.1312j) 0j 0j
# my 0j 0j 0j 0j (7404.8853+13924.8j) 0j 0j
# mxy 0j 0j 0j 0j (-101.319756-190.53061j) 0j 0j
# bmx 0j 0j 0j 0j (3.0701134+5.7733126j) 0j 0j
# bmy 0j 0j 0j 0j (98.75731+185.71196j) 0j 0j
# bmxy 0j 0j 0j 0j (0.25202343+0.4739271j) 0j 0j
# tx 0j 0j 0j 0j (14.426779+27.129389j) 0j 0j
# ty 0j 0j 0j 0j (-199.6823-375.5002j) 0j 0j
# 4 mx 0j 0j 0j 0j (-2934.639-5518.5537j) 0j 0j
# my 0j 0j 0j 0j (7516.2485+14134.217j) 0j 0j
# mxy 0j 0j 0j 0j (-101.319756-190.53061j) 0j 0j
# bmx 0j 0j 0j 0j (-19.69526-37.036705j) 0j 0j
# bmy 0j 0j 0j 0j (100.64615+189.2639j) 0j 0j
# bmxy 0j 0j 0j 0j (0.25202343+0.4739271j) 0j 0j
# tx 0j 0j 0j 0j (14.426779+27.129389j) 0j 0j
# ty 0j 0j 0j 0j (-199.6823-375.5002j) 0j 0j
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_element_node(
column_values, column_names,
headers, self.element_node, self.data)
#data_frame = pd.Panel(self.data, items=column_values,
#major_axis=element_node, minor_axis=headers).to_frame()
#data_frame.columns.names = column_names
#data_frame.index.names = ['ElementID', 'NodeID', 'Item']
else:
data_frame = pd.Panel(self.data,
major_axis=element_node, minor_axis=headers).to_frame()
data_frame.columns.names = ['Static']
data_frame.index.names = ['ElementID', 'NodeID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.element_node):
(eid, nid) = e
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1) = t1
(mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2) = t2
if not allclose(t1, t2):
base1 = '(%s, %s) ' % (eid, nid)
base2 = ' ' * len(base1)
msg += (
'%s (%s, %s, %s, %s, %s, %s, %s, %s)\n'
'%s(%s, %s, %s, %s, %s, %s, %s, %s)\n' % (
base1,
mx1, my1, mxy1, bmx1, bmy1, bmxy1, tx1, ty1,
base2,
mx2, my2, mxy2, bmx2, bmy2, bmxy2, tx2, ty2))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
if i > 0:
raise ValueError(msg)
return True
def add_new_element_sort1(self, dt, eid, term, nid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.element_node[self.itotal, :] = [eid, nid]
self.data[self.itime, self.itotal, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.itotal += 1
self.ielement += 1
def add_sort1(self, dt, eid, nid, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
#assert self.element[self.ielement - 1] == eid, eid
self.element_node[self.itotal, :] = [eid, nid]
self.data[self.itime, self.itotal, :] = [mx, my, mxy, bmx, bmy, bmxy, tx, ty]
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
loads = [
' ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -\n'
' ID FX FY FXY MX MY MXY QX QY\n',]
if is_mag_phase:
mag_real = [' (MAGNITUDE/PHASE)\n \n']
else:
mag_real = [' (REAL/IMAGINARY)\n \n']
cquad4_bilinear = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'] # good
cquad4_linear = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'] # good
ctria3 = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n'] # good
cquad8 = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 8 )\n']
cquadr = [' C O M P L E X F O R C E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D R )\n']
ctria6 = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A 6 )\n']
ctriar = [' C O M P L E X F O R C E S I N T R I A N G U L A R E L E M E N T S ( T R I A R )\n']
#is_bilinear = False
if self.element_type == 144: # CQUAD4
msg = cquad4_linear + mag_real + loads
elif self.element_type == 33: # CQUAD4
msg = cquad4_bilinear + mag_real + loads
elif self.element_type == 64: #CQUAD8
msg = cquad8 + mag_real + loads
elif self.element_type == 82: # CQUADR
msg = cquadr + mag_real + loads
elif self.element_type == 74: # CTRIA3
msg = ctria3 + mag_real + loads
elif self.element_type == 75: # CTRIA6
msg = ctria6 + mag_real + loads
elif self.element_type == 70: # CTRIAR
msg = ctriar + mag_real + loads
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
for eid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
out = write_imag_floats_13e([mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi], is_mag_phase)
[smxr, smyr, smxyr, sbmxr, sbmyr, sbmxyr, stxr, styr,
smxi, smyi, smxyi, sbmxi, sbmyi, sbmxyi, stxi, styi] = out
#"""
#ELEMENT - MEMBRANE FORCES - - BENDING MOMENTS - - TRANSVERSE SHEAR FORCES -
#ID FX FY FXY MX MY MXY QX QY
#0 564 1.543439E+03 7.311177E+02 1.322702E+02 1.080178E+00 1.699104E+00 2.618547E-01 3.877034E+01 4.518554E+00
#358.3129 358.0245 177.5593 177.5292 178.2112 0.0907 358.1465 179.4567
#"""
# fx fy fxy mx my mxy qx qy
f06_file.write(
'0 %8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, smxr, smyr, smxyr, sbmxr, sbmyr, sbmxyr, stxr, styr,
'', smxi, smyi, smxyi, sbmxi, sbmyi, sbmxyi, stxi, styi))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
#eids = self.element
eids_device = self.element * 10 + self.device_code
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = len(self.element)
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
nnodes_all = 5
numwide_imag = 2 + nnodes_all * 17
assert ntotali == numwide_imag
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i 4s i 16f')
struct2 = Struct(endian + b'i 16f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
mx = self.data[itime, :, 0]
my = self.data[itime, :, 1]
mxy = self.data[itime, :, 2]
bmx = self.data[itime, :, 3]
bmy = self.data[itime, :, 4]
bmxy = self.data[itime, :, 5]
tx = self.data[itime, :, 6]
ty = self.data[itime, :, 7]
nwide = 0
ielement = -1
for eid, nid, mxi, myi, mxyi, bmxi, bmyi, bmxyi, txi, tyi in zip(eids, nids, mx, my, mxy, bmx, bmy, bmxy, tx, ty):
if nid == 0:
ielement += 1
eid_device = eids_device[ielement]
data = [eid_device, b'CEN/', nid,
mxi.real, myi.real, mxyi.real, bmxi.real, bmyi.real, bmxyi.real, txi.real, tyi.real,
mxi.imag, myi.imag, mxyi.imag, bmxi.imag, bmyi.imag, bmxyi.imag, txi.imag, tyi.imag]
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
op2.write(struct1.pack(*data))
else:
data = [nid,
mxi.real, myi.real, mxyi.real, bmxi.real, bmyi.real, bmxyi.real, txi.real, tyi.real,
mxi.imag, myi.imag, mxyi.imag, bmxi.imag, bmyi.imag, bmxyi.imag, txi.imag, tyi.imag]
op2_ascii.write(' data=%s\n' % (str(data)))
op2.write(struct2.pack(*data))
nwide += len(data)
assert nwide == ntotal, 'nwide=%s ntotal=%s' % (nwide, ntotal)
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBarForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
self.result_flag = 0
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.itime = 0
self.nelements = 0 # result specific
#self.element_type = 'CBAR'
#self.cid = {} # gridGauss
#if is_sort1:
##sort1
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = ['bending_moment_1a', 'bending_moment_2a',
'bending_moment_1b', 'bending_moment_2b',
'shear1', 'shear2', 'axial', 'torque', ]
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexCBarForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (
#self.ntimes, self.nelements, self.ntotal, self.subtitle))
if self.is_built:
return
nnodes = 1
#self.names = []
#self.nelements //= nnodes
self.nelements //= self.ntimes
#self.ntotal //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
self.is_built = True
#print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
self._times = zeros(self.ntimes, 'float32')
self.element = zeros(self.ntotal, 'int32')
# the number is messed up because of the offset for the element's properties
if not self.nelements * nnodes == self.ntotal:
msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (
self.ntimes, self.nelements, nnodes, self.nelements * nnodes, self.ntotal)
raise RuntimeError(msg)
#[bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq]
self.data = zeros((self.ntimes, self.ntotal, 8), 'complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(column_values, column_names,
headers, self.element, self.data)
#self.data_frame = pd.Panel(self.data, items=column_values,
#major_axis=self.element, minor_axis=headers).to_frame()
#self.data_frame.columns.names = column_names
#self.data_frame.index.names = ['ElementID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(s1a1, s2a1, s3a1, s4a1, axial1, s2a1, s2b1, s2c1, s2d1) = t1
(s1a2, s2a2, s3a2, s4a2, axial2, s2a2, s2b2, s2c2, s2d2) = t2
#d = t1 - t2
if not allclose([s1a1.real, s2a1.real, s3a1.real, s4a1.real, axial1.real, s2a1.real, s2b1.real, s2c1.real, s2d1.real],
[s1a2.real, s2a2.real, s3a2.real, s4a2.real, axial2.real, s2a2.real, s2b2.real, s2c2.real, s2d2.real], atol=0.0001):
#if not np.array_equal(t1, t2):
msg += '%-4s (%s, %s, %s, %s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid,
s1a1.real, s2a1.real, s3a1.real, s4a1.real, axial1.real, s2a1.real, s2b1.real, s2c1.real, s2d1.real,
s1a2.real, s2a2.real, s3a2.real, s4a2.real, axial2.real, s2a2.real, s2b2.real, s2c2.real, s2d2.real,
)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.data[self.itime, self.itotal, :] = [bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq]
self.element[self.itotal] = eid
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
else:
msg.append(' type=%s nelements=%i; table_name=%r\n' % (
self.__class__.__name__, nelements, self.table_name))
msg.append(' eType, cid\n')
msg.append(' data: [ntimes, nelements, 8] where 8=[%s]\n' % str(', '.join(self.get_headers())))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' is_sort1=%s is_sort2=%s\n' % (self.is_sort1, self.is_sort2))
msg.append(' CBAR\n')
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#msg_temp, nnodes = get_f06_header(self, is_mag_phase, is_sort1)
#is_sort1 = False
if is_mag_phase:
mag_phase = ' (MAGNITUDE/PHASE)\n \n'
else:
mag_phase = ' (REAL/IMAGINARY)\n \n'
name = self.data_code['name']
if name == 'freq':
name = 'FREQUENCY'
#else: # mode
#raise RuntimeError(name)
if is_sort1:
line1 = '0 ELEMENT BEND-MOMENT-END-A BEND-MOMENT-END-B SHEAR\n'
line2 = ' ID. PLANE 1 PLANE 2 PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n'
else:
line1 = ' BEND-MOMENT-END-A BEND-MOMENT-END-B SHEAR\n'
line2 = ' %16s PLANE 1 PLANE 2 PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n' % name
# force
msg_temp = header + [
' C O M P L E X F O R C E S I N B A R E L E M E N T S ( C B A R )\n',
mag_phase,
' ',
line1,
line2,
]
if self.is_sort1:
assert self.is_sort1 == True, str(self)
if is_sort1:
page_num = self._write_sort1_as_sort1(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
self._write_sort1_as_sort2(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
assert self.is_sort1 == True, str(self)
return page_num - 1
def _write_sort1_as_sort1(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
eids = self.element
#times = self._times
ntimes = self.data.shape[0]
for itime in range(ntimes):
dt = self._times[itime]
dt_line = ' %14s = %12.5E\n' % (self.data_code['name'], dt)
header[1] = dt_line
msg = header + msg_temp
f06_file.write(''.join(msg))
#bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq
assert self.is_sort1 == True, str(self)
bm1a = self.data[itime, :, 0]
bm2a = self.data[itime, :, 1]
bm1b = self.data[itime, :, 2]
bm2b = self.data[itime, :, 3]
ts1 = self.data[itime, :, 4]
ts2 = self.data[itime, :, 5]
af = self.data[itime, :, 6]
trq = self.data[itime, :, 7]
for eid, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi in zip(eids, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
vals = (bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(bm1air, bm2air, bm1bir, bm2bir, ts1ir, ts2ir, afir, trqir,
bm1aii, bm2aii, bm1bii, bm2bii, ts1ii, ts2ii, afii, trqii) = vals2
f06_file.write('0%16i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %14s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, bm1air, bm2air, bm1bir, bm2bir, ts1ir, ts2ir, afir, trqir,
'', bm1aii, bm2aii, bm1bii, bm2bii, ts1ii, ts2ii, afii, trqii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def _write_sort1_as_sort2(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
eids = self.element
times = self._times
#ntimes = self.data.shape[0]
for ieid, eid in enumerate(eids):
eid_line = ' ELEMENT-ID = %s' % (eid)
header[1] = eid_line
msg = header + msg_temp
f06_file.write(''.join(msg))
#bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq
bm1a = self.data[:, ieid, 0]
bm2a = self.data[:, ieid, 1]
bm1b = self.data[:, ieid, 2]
bm2b = self.data[:, ieid, 3]
ts1 = self.data[:, ieid, 4]
ts2 = self.data[:, ieid, 5]
af = self.data[:, ieid, 6]
trq = self.data[:, ieid, 7]
for dt, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi in zip(times, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
vals = (bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(bm1air, bm2air, bm1bir, bm2bir, ts1ir, ts2ir, afir, trqir,
bm1aii, bm2aii, bm1bii, bm2bii, ts1ii, ts2ii, afii, trqii) = vals2
f06_file.write('0%16s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %15s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
write_float_12e(dt),
bm1air, bm2air, bm1bir, bm2bir, ts1ir, ts2ir, afir, trqir,
'', bm1aii, bm2aii, bm1bii, bm2bii, ts1ii, ts2ii, afii, trqii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
eids = self.element
eids_device = eids * 10 + self.device_code
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i 16f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
#bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq
bm1a = self.data[itime, :, 0]
bm2a = self.data[itime, :, 1]
bm1b = self.data[itime, :, 2]
bm2b = self.data[itime, :, 3]
ts1 = self.data[itime, :, 4]
ts2 = self.data[itime, :, 5]
af = self.data[itime, :, 6]
trq = self.data[itime, :, 7]
assert len(eids_device) == len(bm1a.real)
for eid_device, bm1ai, bm2ai, bm1bi, bm2bi, ts1i, ts2i, afi, trqi in zip(
eids_device, bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq):
data = [eid_device,
bm1ai.real, bm2ai.real, bm1bi.real, bm2bi.real, ts1i.real, ts2i.real, afi.real, trqi.real,
bm1ai.imag, bm2ai.imag, bm1bi.imag, bm2bi.imag, ts1i.imag, ts2i.imag, afi.imag, trqi.imag]
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBeamForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
self.result_flag = 0
self.itime = 0
self.nelements = 0 # result specific
#self.element_type = 'CBEAM'
#if is_sort1:
##sort1
#pass
#else:
#raise NotImplementedError('SORT2')
def get_headers(self):
headers = [
'sd', 'bending_moment1', 'bending_moment2', 'shear1', 'shear2',
'axial_force', 'total_torque', 'warping_torque', ]
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexCBeamForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (
#self.ntimes, self.nelements, self.ntotal, self.subtitle))
nnodes = 11
#self.names = []
#self.nelements //= nnodes
self.nelements //= self.ntimes
#self.ntotal //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
self.is_built = True
#print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
self._times = zeros(self.ntimes, 'float32')
self.element = zeros(self.ntotal, 'int32')
self.element_node = zeros((self.ntotal, 2), 'int32')
# the number is messed up because of the offset for the element's properties
if not self.nelements * nnodes == self.ntotal:
msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (
self.ntimes, self.nelements, nnodes, self.nelements * nnodes, self.ntotal)
raise RuntimeError(msg)
#[sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq]
self.data = zeros((self.ntimes, self.ntotal, 8), 'complex64')
def finalize(self):
sd = self.data[0, :, 0].real
i_sd_zero = np.where(sd != 0.0)[0]
i_node_zero = np.where(self.element_node[:, 1] != 0)[0]
assert i_node_zero.max() > 0, 'CBEAM element_node hasnt been filled'
i = np.union1d(i_sd_zero, i_node_zero)
self.element = self.element[i]
self.element_node = self.element_node[i, :]
self.data = self.data[:, i, :]
def build_dataframe(self):
"""creates a pandas dataframe"""
# Freq 0.00001 10.00000 ... 50.00000 60.00000
# ElementID Location Item ...
# 12.0 12.0 bending_moment1 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# bending_moment2 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# shear1 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# shear2 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# axial_force 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# total_torque 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# warping_torque 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# 0.0 1.0 bending_moment1 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# bending_moment2 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# shear1 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# shear2 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# axial_force 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# total_torque 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
# warping_torque 0.000000+0.000000j 0.000000+0.000000j ... 0.000000+0.000000j 0.000000+0.000000j
import pandas as pd
headers = self.get_headers()[1:]
column_names, column_values = self._build_dataframe_transient_header()
element_location = [
self.element_node[:, 0],
self.data[0, :, 0].real,
]
is_v25 = pd.__version__ >= '0.25'
if is_v25:
print(f'skipping pandas {self.class_name}')
return
# wrong type for ElementID
#data_frame = self._build_pandas_transient_element_node(
#column_values, column_names,
#headers, element_location, self.data[:, :, 1:])
#data_frame.index.names = ['ElementID', 'Location', 'Item']
#data_frame.index['ElementID', :]# .astype('int32')
#print(data_frame)
data_frame = pd.Panel(self.data[:, :, 1:], items=column_values,
major_axis=element_location, minor_axis=headers).to_frame()
data_frame.columns.names = column_names
data_frame.index.names = ['ElementID', 'Location', 'Item']
#print(data_frame)
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
return self.assert_equal(table)
def assert_equal(self, table, rtol=1.e-5, atol=1.e-8):
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.allclose(self.data, table.data, atol=atol):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
#print(t1)
#'sd', 'bending_moment1', 'bending_moment2', 'shear1', 'shear2',
#'axial_force', 'total_torque', 'warping_torque', ]
(sd1, bm11, bm21, shear11, shear21, axial1, total_torque1, warp_torque1) = t1
(sd2, bm12, bm22, shear12, shear22, axial2, total_torque2, warp_torque2) = t2
d = t1 - t2
if not allclose(t1, t2, atol=atol):
msg += (
'%-4s (%s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj)\n'
' (%s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj)\n'
' dt12=(%s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj, %s, %sj)\n' % (
eid,
bm11.real, bm11.imag,
bm21.real, bm21.imag,
shear11.real, shear11.imag,
shear21.real, shear21.imag,
axial1.real, axial1.imag,
total_torque1.real, total_torque1.imag,
warp_torque1.real, warp_torque1.imag,
bm12.real, bm12.imag,
bm22.real, bm22.imag,
shear12.real, shear12.imag,
shear22.real, shear22.imag,
axial2.real, axial2.imag,
total_torque2.real, total_torque2.imag,
warp_torque2.real, warp_torque2.imag,
d[0].real, d[0].imag,
d[1].real, d[1].imag,
d[2].real, d[2].imag,
d[3].real, d[3].imag,
d[4].real, d[4].imag,
d[5].real, d[5].imag,
d[6].real, d[6].imag,
))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
#def add_new_element_sort1(self, dt, eid, nid, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
#return self.add_sort1(dt, eid, nid, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq)
def add_sort1(self, dt, eid, nid, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.data[self.itime, self.itotal, :] = [sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq]
self.element[self.itotal] = eid
self.element_node[self.itotal, :] = [eid, nid]
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
else:
msg.append(' type=%s nelements=%i\n' % (self.__class__.__name__, nelements))
#msg.append(' eType, cid\n')
msg.append(' data: [ntimes, nelements, 8] where 8=[%s]\n' % str(', '.join(self.get_headers())))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' is_sort1=%s is_sort2=%s\n' % (self.is_sort1, self.is_sort2))
msg.append(' CBEAM\n')
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
# option B
#' C O M P L E X F O R C E S I N B E A M E L E M E N T S ( C B E A M ) '
#' (REAL/IMAGINARY)'
#' STAT DIST/ - BENDING MOMENTS - - WEB SHEARS - AXIAL TOTAL WARPING'
#' ELEMENT-ID GRID LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE TORQUE'
#'0 20'
#'0 11 0.000 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#'0 12 1.000 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0 0.0'
#msg_temp, nnodes = get_f06_header(self, is_mag_phase, is_sort1)
#print('write_f06 not implemented for ComplexCBeamForceArray')
#return page_num
#asdf
#is_sort1 = False
if is_mag_phase:
mag_phase = ' (MAGNITUDE/PHASE)\n \n'
else:
mag_phase = ' (REAL/IMAGINARY)\n \n'
if is_sort1:
line1 = '0 ELEMENT BEND-MOMENT-END-A BEND-MOMENT-END-B SHEAR\n'
line2 = ' ID. PLANE 1 PLANE 2 PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n'
else:
name = self.data_code['name']
if name == 'freq':
name = 'FREQUENCY'
else: # mode
raise RuntimeError(name)
line1 = ' BEND-MOMENT-END-A BEND-MOMENT-END-B SHEAR\n'
line2 = ' %16s PLANE 1 PLANE 2 PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE\n' % name
# force
msg_temp = header + [
' C O M P L E X F O R C E S I N B A R E L E M E N T S ( C B E A M )\n',
mag_phase,
' ',
line1,
line2,
]
if self.is_sort1:
assert self.is_sort1 == True, str(self)
#if is_sort1:
page_num = self._write_sort1_as_sort1(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
#else:
#self._write_sort1_as_sort2(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
assert self.is_sort1 == True, str(self)
return page_num - 1
def _write_sort1_as_sort1(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
eids = self.element
#times = self._times
ntimes = self.data.shape[0]
for itime in range(ntimes):
dt = self._times[itime]
dt_line = ' %14s = %12.5E\n' % (self.data_code['name'], dt)
header[1] = dt_line
msg = header + msg_temp
f06_file.write(''.join(msg))
#bm1a, bm2a, bm1b, bm2b, ts1, ts2, af, trq
assert self.is_sort1 == True, str(self)
#sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq
sd = self.data[itime, :, 0]
bm1 = self.data[itime, :, 1]
bm2 = self.data[itime, :, 2]
ts1 = self.data[itime, :, 3]
ts2 = self.data[itime, :, 4]
af = self.data[itime, :, 5]
ttrq = self.data[itime, :, 6]
wtrq = self.data[itime, :, 7]
for eid, sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi in zip(eids, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
vals = (sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(sdir, bm1ir, bm2ir, ts1ir, ts2ir, afir, ttrqir, wtrqir,
sdii, bm1ii, bm2ii, ts1ii, ts2ii, afii, ttrqii, wtrqii) = vals2
f06_file.write('0%16i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %14s %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, sdir, bm1ir, bm2ir, ts1ir, ts2ir, afir, ttrqir, wtrqir,
'', sdii, bm1ii, bm2ii, ts1ii, ts2ii, afii, ttrqii, wtrqii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def write_op2(self, op2, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
#long_form = False
#if nids.min() == 0:
#long_form = True
eids_device = eids * 10 + self.device_code
ueids = np.unique(eids)
#ieid = np.searchsorted(eids, ueids)
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = len(ueids)
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'2i 15f')
struct2 = Struct(endian + b'i 15f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
sd = self.data[itime, :, 0]
bm1 = self.data[itime, :, 1]
bm2 = self.data[itime, :, 2]
ts1 = self.data[itime, :, 3]
ts2 = self.data[itime, :, 4]
af = self.data[itime, :, 5]
ttrq = self.data[itime, :, 6]
wtrq = self.data[itime, :, 7]
icount = 0
nwide = 0
ielement = 0
assert len(eids) == len(sd)
for eid, sdi, bm1i, bm2i, ts1i, ts2i, afi, ttrqi, wtrqi in zip(eids, sd, bm1, bm2, ts1, ts2, af, ttrq, wtrq):
if icount == 0:
eid_device = eids_device[ielement]
nid = nids[ielement]
data = [eid_device, nid, sdi.real,
bm1i.real, bm2i.real, ts1i.real, ts2i.real, afi.real, ttrqi.real, wtrqi.real,
bm1i.imag, bm2i.imag, ts1i.imag, ts2i.imag, afi.imag, ttrqi.imag, wtrqi.imag] # 17
op2.write(struct1.pack(*data))
ielement += 1
icount = 1
elif nid > 0 and icount > 0:
# 11 total nodes, with 1, 11 getting an nid; the other 9 being
# xxb sections
data = [0, 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.]
#print('***adding %s\n' % (10-icount))
for unused_i in range(10 - icount):
op2.write(struct2.pack(*data))
nwide += len(data)
eid_device2 = eids_device[ielement]
#print(eids_device)
assert eid_device == eid_device2, 'eid_device=%s eid_device2=%s' % (eid_device, eid_device2)
nid = nids[ielement]
data = [nid, sdi.real,
bm1i.real, bm2i.real, ts1i.real, ts2i.real, afi.real, ttrqi.real, wtrqi.real,
bm1i.imag, bm2i.imag, ts1i.imag, ts2i.imag, afi.imag, ttrqi.imag, wtrqi.imag] # 16
op2.write(struct2.pack(*data))
ielement += 1
icount = 0
else:
raise RuntimeError('CBEAM OEF op2 writer')
#data = [0, xxb, sxc, sxd, sxe, sxf, smax, smin, smt, smc] # 10
#op2.write(struct2.pack(*data))
#icount += 1
op2_ascii.write(' eid_device=%s data=%s\n' % (eid_device, str(data)))
nwide += len(data)
assert ntotal == nwide, 'ntotal=%s nwide=%s' % (ntotal, nwide)
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBendForceArray(BaseElement): # 69-CBEND
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
BaseElement.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
headers = [
'bending_moment_1a', 'bending_moment_2a', 'shear_1a', 'shear_2a', 'axial_a', 'torque_a',
'bending_moment_1b', 'bending_moment_2b', 'shear_1b', 'shear_2b', 'axial_b', 'torque_b',
]
return headers
def build(self):
"""sizes the vectorized attributes of the ComplexCBendForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element_node = zeros((self.nelements, 3), dtype='int32')
#[bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a
# bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b]
self.data = zeros((self.ntimes, self.nelements, 12), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Freq 0.0 2.5
#ElementID Item
#6901 bending_moment_1a 1.066567-0.035549j 1.066996-0.035577j
# bending_moment_2a 1.101375-0.036709j 1.102188-0.036763j
# shear_1a 0.516478-0.017214j 0.516842-0.017239j
# shear_2a 0.859292-0.028640j 0.860111-0.028695j
# axial_a 0.834822-0.027825j 0.834982-0.027835j
# torque_a 0.953420-0.031777j 0.953947-0.031813j
# bending_moment_1b -0.284733+0.009490j -0.284828+0.009497j
# bending_moment_2b 0.094127-0.003137j 0.093836-0.003118j
# shear_1b 0.834822-0.027825j 0.834982-0.027835j
# shear_2b 0.859292-0.028640j 0.860111-0.028695j
# axial_b -0.516478+0.017214j -0.516842+0.017239j
# torque_b -0.242082+0.008069j -0.242077+0.008068j
#6902 bending_moment_1a -0.931214+0.031037j -0.931519+0.031058j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
# element_node is (nelements, 3)
element = self.element_node[:, 0]
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, element, self.data)
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.element_node, table.element_node):
assert self.element_node.shape == table.element_node.shape, 'element_node shape=%s table.shape=%s' % (self.element_node.shape, table.element_nodes.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
msg += 'Eid, Nid_A, Nid_B\n'
for (eid1, nida1, nidb1), (eid2, nida2, nidb2) in zip(self.element_node, table.element_node):
msg += '(%s, %s, %s), (%s, %s, %s)\n' % (eid1, nida1, nidb1, eid2, nida2, nidb2)
print(msg)
raise ValueError(msg)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
eids = self.element_node[:, 0]
for itime in range(self.ntimes):
for ie, eid in enumerate(eids):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(bending_moment_1a1, bending_moment_2a1, shear_1a1, shear_2a1, axial_a1, torque_a1,
bending_moment_1b1, bending_moment_2b1, shear_1b1, shear_2b1, axial_b1, torque_b1) = t1
(bending_moment_1a2, bending_moment_2a2, shear_1a2, shear_2a2, axial_a2, torque_a2,
bending_moment_1b2, bending_moment_2b2, shear_1b2, shear_2b2, axial_b2, torque_b2) = t2
if not allclose(t1, t2):
msg += '(%s) (%s, %s) (%s, %s)\n' % (
eid,
bending_moment_1a1.real,
bending_moment_1b1.real,
bending_moment_1a2.real,
bending_moment_1b2.real, )
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#if not allclose(t1, t2):
#msg += '(%s) (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
#eid,
#bending_moment_1a1, bending_moment_2a1, shear_1a1, shear_2a1, axial_a1, torque_a1,
#bending_moment_1b1, bending_moment_2b1, shear_1b1, shear_2b1, axial_b1, torque_b1,
#bending_moment_1a2, bending_moment_2a2, shear_1a2, shear_2a2, axial_a2, torque_a2,
#bending_moment_1b2, bending_moment_2b2, shear_1b2, shear_2b2, axial_b2, torque_b2)
#i += 1
#if i > 10:
#print(msg)
#raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid,
nid_a, bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
nid_b, bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
#bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
#bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b
self._times[self.itime] = dt
self.element_node[self.ielement] = [eid, nid_a, nid_b]
self.data[self.itime, self.ielement, :] = [
bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b
]
self.ielement += 1
if self.ielement == self.nelements:
self.ielement = 0
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
msg = [' C O M P L E X F O R C E S I N B E N D E L E M E N T S ( C B E N D )\n']
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n']
else:
msg += [' (REAL/IMAGINARY)\n']
if is_sort1:
msg += [
' - BENDING MOMENTS - - SHEARS - AXIAL'
' ELEMENT-ID GRID END PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE'
]
else:
raise NotImplementedError('sort2')
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#' C O M P L E X F O R C E S I N B E N D E L E M E N T S ( C B E N D )'
#' (REAL/IMAGINARY)'
#' - BENDING MOMENTS - - SHEARS - AXIAL'
#' ELEMENT-ID GRID END PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE'
#'0 27 21 A 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0'
#'0 22 B 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0'
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element_node[:, 0]
nid_a = self.element_node[:, 1]
nid_b = self.element_node[:, 2]
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
bending_moment_1a = self.data[itime, :, 0]
bending_moment_2a = self.data[itime, :, 1]
shear_1a = self.data[itime, :, 2]
shear_2a = self.data[itime, :, 3]
axial_a = self.data[itime, :, 4]
torque_a = self.data[itime, :, 5]
bending_moment_1b = self.data[itime, :, 6]
bending_moment_2b = self.data[itime, :, 7]
shear_1b = self.data[itime, :, 8]
shear_2b = self.data[itime, :, 9]
axial_b = self.data[itime, :, 10]
torque_b = self.data[itime, :, 11]
for (eid,
nid_ai, bending_moment_1ai, bending_moment_2ai, shear_1ai, shear_2ai, axial_ai, torque_ai,
nid_bi, bending_moment_1bi, bending_moment_2bi, shear_1bi, shear_2bi, axial_bi, torque_bi) in zip(eids,
nid_a, bending_moment_1a, bending_moment_2a, shear_1a, shear_2a, axial_a, torque_a,
nid_b, bending_moment_1b, bending_moment_2b, shear_1b, shear_2b, axial_b, torque_b):
[bending_moment_1ari, bending_moment_2ari, shear_1ari, shear_2ari, axial_ari, torque_ari,
bending_moment_1bri, bending_moment_2bri, shear_1bri, shear_2bri, axial_bri, torque_bri,
bending_moment_1aii, bending_moment_2aii, shear_1aii, shear_2aii, axial_aii, torque_aii,
bending_moment_1bii, bending_moment_2bii, shear_1bii, shear_2bii, axial_bii, torque_bii,
] = write_imag_floats_13e(
[bending_moment_1ai, bending_moment_2ai, shear_1ai, shear_2ai, axial_ai, torque_ai,
bending_moment_1bi, bending_moment_2bi, shear_1bi, shear_2bi, axial_bi, torque_bi],
is_mag_phase)
f06_file.write(
'0 %8s%8s A %13s %13s %13s %13s %13s %s\n'
' %13s %13s %13s %13s %13s %s\n'
'0 %8s%8s B %13s %13s %13s %13s %13s %s\n'
' %13s %13s %13s %13s %13s %s\n'
% (
eid, nid_ai,
bending_moment_1ari, bending_moment_2ari, shear_1ari, shear_2ari, axial_ari, torque_ari,
bending_moment_1aii, bending_moment_2aii, shear_1aii, shear_2aii, axial_aii, torque_aii,
'', nid_bi,
bending_moment_1bri, bending_moment_2bri, shear_1bri, shear_2bri, axial_bri, torque_bri,
bending_moment_1bii, bending_moment_2bii, shear_1bii, shear_2bii, axial_bii, torque_bii,))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class ComplexSolidPressureForceArray(ComplexForceObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
self.element_type = None
self.element_name = None
ComplexForceObject.__init__(self, data_code, isubcase)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if is_sort1:
pass
else:
raise NotImplementedError('SORT2')
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
headers = ['ax', 'ay', 'az', 'vx', 'vy', 'vz', 'pressure']
return headers
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexSolidPressureForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[ax, ay, az, vx, vy, vz, pressure]
self.data = zeros((self.ntimes, self.ntotal, 7), dtype='complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Mode 1 2
#EigenvalueReal -0.000000 -0.000000
#EigenvalueImag -0.000000 -0.000000
#Damping 0.000000 0.000000
#ElementID Item
#1000 ax -1.887379e-13+2.791559e-13j -1.901257e-13+2.789015e-13j
# ay 3.330669e-14-7.316397e-14j 1.776357e-14-7.368508e-14j
# az -1.360023e-13-9.545406e-14j -1.432188e-13-8.333307e-14j
# vx 0.000000e+00+0.000000e+00j 0.000000e+00+0.000000e+00j
# vy 0.000000e+00+0.000000e+00j 0.000000e+00+0.000000e+00j
# vz 0.000000e+00+0.000000e+00j 0.000000e+00+0.000000e+00j
# pressure 0.000000e+00+0.000000e+00j 0.000000e+00+0.000000e+00j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, eid in enumerate(self.element):
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(ax1, ay1, az1, vx1, vy1, vz1, pressure1) = t1
(ax2, ay2, az2, vx2, vy2, vz2, pressure2) = t2
#rpressure1 = pressure1.real
#rpressure2 = pressure2.real
if not allclose([ax1, ay1, az1, vx1, vy1, vz1],
[ax2, ay2, az2, vx2, vy2, vz2]):
msg += '%s (%s, %s) (%s, %s)\n' % (
eid,
ax1.real, t1.imag,
ax2.real, t2.imag)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, ename, ax, ay, az, vx, vy, vz, pressure):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [ax, ay, az, vx, vy, vz, pressure]
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
msg = [
' ( R O O T M E A N S Q U A R E ) \n'
' C O M P L E X A C C E L E R A T I O N S V E L O C I T I E S A N D P R E S S U R E L E V E L S\n'
#' (REAL/IMAGINARY)'
#' ELE-ID EL-TYPE X-ACCELERATION Y-ACCELERATION Z-ACCELERATION X-VELOCITY Y-VELOCITY Z-VELOCITY PRESSURE (DB)'
#' 2000 PENPR 6.883253E+06 1.066544E+07 -6.883253E+06 7.288279E+05 -3.134843E+04 -7.288279E+05 1.162309E+02'
#' 1.831744E+07 -7.878719E+05 -1.831744E+07 -2.738759E+05 -4.243642E+05 2.738759E+05'
#''
]
#msg = [' C O M P L E X A C O U S T I C P R E S S U R E R E S U L T S']
#' C O M P L E X A C O U S T I C P R E S S U R E R E S U L T S'
#' (MAGNITUDE/PHASE)'
#' '
#' POINT ID. TYPE P P(RMS) DB DB(A)'
#'0 57 S 7.339671E+05 5.189931E+05 1.173135E+02 3.011353E+01'
#' 249.9102 249.9102 249.9102 249.9102'
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n \n']
else:
msg += [' (REAL/IMAGINARY)\n \n']
if is_sort1:
msg += [' ELE-ID EL-TYPE X-ACCELERATION Y-ACCELERATION Z-ACCELERATION X-VELOCITY Y-VELOCITY Z-VELOCITY PRESSURE (DB)\n']
#msg += [
#' POINT ID. TYPE P P(RMS) DB DB(A)\n'
#]
#' 14 0.0 / 0.0 0.0 / 0.0'
else:
raise NotImplementedError('sort2')
return msg
#def get_element_index(self, eids):
## elements are always sorted; nodes are not
#itot = searchsorted(eids, self.element) #[0]
#return itot
#def eid_to_element_node_index(self, eids):
##ind = ravel([searchsorted(self.element == eid) for eid in eids])
#ind = searchsorted(eids, self.element)
##ind = ind.reshape(ind.size)
##ind.sort()
#return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
# write the f06
#(ntimes, ntotal, two) = self.data.shape
ntimes = self.data.shape[0]
eids = self.element
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
etypei = self.element_type
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
ax = self.data[itime, :, 0]
ay = self.data[itime, :, 0]
az = self.data[itime, :, 0]
vx = self.data[itime, :, 0]
vy = self.data[itime, :, 0]
vz = self.data[itime, :, 0]
pressure = self.data[itime, :, 0]
for eid, axi, ayi, azi, vxi, vyi, vzi, pressurei in zip(eids, ax, ay, az, vx, vy, vz, pressure):
out = write_imag_floats_13e([axi, ayi, azi, vxi, vyi, vzi, pressurei], is_mag_phase)
[saxr, sayr, sazr, svxr, svyr, svzr, spressurer,
saxi, sayi, sazi, svxi, svyi, svzi, spressurei] = out
#' 1000 HEXPR 1.582050E-08 5.505425E+06 2.598164E-09 -8.884337E-10 -4.806934E+04 1.046571E-10 9.968034E+01'
#' -1.116439E-08 -6.040572E+05 1.315160E-09 -1.258955E-09 -4.381078E+05 -2.067553E-10'
f06_file.write(' %8i %8s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %8s %-13s %-13s %-13s %-13s %-13s %s\n\n'
% (eid, etypei, saxr, sayr, sazr, svxr, svyr, svzr, spressurer,
'', '', saxi, sayi, sazi, svxi, svyi, svzi))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
if self.is_sort1:
struct1 = Struct(endian + b'i 8s13f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
etypei = self.element_type
if etypei == 76:
ename = b'HEXPR'
elif etypei == 77:
ename = b'PENPR'
elif etypei == 78:
ename = b'TETPR'
else:
raise NotImplementedError(self)
#etypeb = self.element_type#.encode('ascii')
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
ax = self.data[itime, :, 0]
ay = self.data[itime, :, 0]
az = self.data[itime, :, 0]
vx = self.data[itime, :, 0]
vy = self.data[itime, :, 0]
vz = self.data[itime, :, 0]
pressure = self.data[itime, :, 0]
for eid, eid_device, axi, ayi, azi, vxi, vyi, vzi, pressurei in zip(
eids, eids_device, ax, ay, az, vx, vy, vz, pressure):
out = write_imag_floats_13e([axi, ayi, azi, vxi, vyi, vzi, pressurei], is_mag_phase)
[saxr, sayr, sazr, svxr, svyr, svzr, spressurer,
saxi, sayi, sazi, svxi, svyi, svzi, spressurei] = out
#' 1000 HEXPR 1.582050E-08 5.505425E+06 2.598164E-09 -8.884337E-10 -4.806934E+04 1.046571E-10 9.968034E+01'
#' -1.116439E-08 -6.040572E+05 1.315160E-09 -1.258955E-09 -4.381078E+05 -2.067553E-10'
data = [
eid_device, ename,
axi.real, ayi.real, azi.real, vxi.real, vyi.real, vzi.real, pressurei.real,
axi.imag, ayi.imag, azi.imag, vxi.imag, vyi.imag, vzi.imag,
]
op2_ascii.write(' %8i %8s %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %8s %-13s %-13s %-13s %-13s %-13s %s\n\n'
% (eid, etypei, saxr, sayr, sazr, svxr, svyr, svzr, spressurer,
'', '', saxi, sayi, sazi, svxi, svyi, svzi))
op2.write(struct1.pack(*data))
#for eid, eid_device, fxi, fyi, fzi, mxi, myi, mzi in zip(eids, eids_device, fx, fy, fz, mx, my, mz):
#data = [
#eid_device,
#fxi.real, fyi.real, fzi.real, mxi.real, myi.real, mzi.real,
#fxi.imag, fyi.imag, fzi.imag, mxi.imag, myi.imag, mzi.imag,
#]
#vals = (fxi, fyi, fzi, mxi, myi, mzi)
#vals2 = write_imag_floats_13e(vals, is_mag_phase)
#(fxir, fyir, fzir, mxir, myir, mzir,
#fxii, fyii, fzii, mxii, myii, mzii) = vals2
#op2_ascii.write('0%26i %-13s %-13s %-13s %-13s %-13s %s\n'
#' %26s %-13s %-13s %-13s %-13s %-13s %s\n' % (
#eid, fxir, fyir, fzir, mxir, myir, mzir,
#'', fxii, fyii, fzii, mxii, myii, mzii))
#op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBushForceArray(ComplexForceObject):
def get_headers(self):
headers = ['fx', 'fy', 'fz', 'mx', 'my', 'mz']
return headers
def __init__(self, data_code, is_sort1, isubcase, dt):
ComplexForceObject.__init__(self, data_code, isubcase)
self.result_flag = 0
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.itime = 0
self.nelements = 0 # result specific
self.element_type = 'CBUSH'
@property
def is_real(self):
return False
@property
def is_complex(self):
return True
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def build(self):
"""sizes the vectorized attributes of the ComplexCBushForceArray"""
#print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (
#self.ntimes, self.nelements, self.ntotal, self.subtitle))
if self.is_built:
return
nnodes = 1
#self.names = []
#self.nelements //= nnodes
self.nelements /= self.ntimes
#self.ntotal //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
self.is_built = True
#print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
self._times = zeros(self.ntimes, 'float32')
self.element = zeros(self.ntotal, 'int32')
# the number is messed up because of the offset for the element's properties
if self.nelements * nnodes != self.ntotal:
msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (
self.ntimes, self.nelements, nnodes, self.nelements * nnodes, self.ntotal)
raise RuntimeError(msg)
#[fx, fy, fz, mx, my, mz]
self.data = zeros((self.ntimes, self.ntotal, 6), 'complex64')
def build_dataframe(self):
"""creates a pandas dataframe"""
#Freq 10.0
#ElementID Item
#123 fx 10000.000000+0.000021j
# fy 1000.000000+0.000002j
# fz 100.000000+0.000000j
# mx 7000.000000+0.000000j
# my 700.000000+0.000000j
# mz 70.000000+0.000000j
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(tx1, ty1, tz1, rx1, ry1, rz1) = t1
(tx2, ty2, tz2, rx2, ry2, rz2) = t2
d = t1 - t2
if not allclose([tx1.real, tx1.imag, ty1.real, ty1.imag],
[tx2.real, tx2.imag, ty2.real, ty2.imag], atol=0.0001):
#if not np.array_equal(t1, t2):
msg += '%-4s (%s, %sj, %s, %sj)\n (%s, %sj, %s, %sj)\n dt12=(%s, %sj, %s, %sj)\n' % (
eid,
tx1.real, tx1.imag, ty1.real, ty1.imag,
tx2.real, tx2.imag, ty2.real, ty2.imag,
d[0].real, d[0].imag, d[1].real, d[1].imag,)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, fx, fy, fz, mx, my, mz):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
#[fx, fy, fz, mx, my, mz]
self._times[self.itime] = dt
self.data[self.itime, self.itotal, :] = [fx, fy, fz, mx, my, mz]
self.element[self.itotal] = eid
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
else:
msg.append(' type=%s nelements=%i; table_name=%r\n' % (
self.__class__.__name__, nelements, self.table_name))
msg.append(' eType, cid\n')
msg.append(' data: [ntimes, nelements, 6] where 6=[%s]\n' % str(', '.join(self.get_headers())))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
# msg.append(' is_sort1=%s is_sort2=%s\n' % (self.is_sort1, self.is_sort2))
msg.append(' CBUSH\n')
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#msg_temp, nnodes = get_f06_header(self, is_mag_phase, is_sort1)
# write the f06
#is_sort1 = False
if is_mag_phase:
mag_phase = ' (MAGNITUDE/PHASE)\n\n'
else:
mag_phase = ' (REAL/IMAGINARY)\n\n'
name = self.data_code['name']
if name == 'freq':
name = 'FREQUENCY'
else:
raise RuntimeError(name)
# is_sort1 = True
if is_sort1:
line2 = ' ID. FORCE-X FORCE-Y FORCE-Z MOMENT-X MOMENT-Y MOMENT-Z \n'
else:
line2 = ' %26s FORCE-X FORCE-Y FORCE-Z MOMENT-X MOMENT-Y MOMENT-Z \n' % name
# force
msg_temp = header + [
' C O M P L E X F O R C E S I N B U S H E L E M E N T S ( C B U S H ) \n',
mag_phase,
' ',
# line1,
line2,
]
if self.is_sort1:
if is_sort1:
page_num = self._write_sort1_as_sort1(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
page_num = self._write_sort1_as_sort2(f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase)
else:
assert self.is_sort1 == True, str(self)
return page_num - 1
def _write_sort1_as_sort1(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
ntimes = self.data.shape[0]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime]
dt_line = ' %14s = %12.5E\n' % (self.data_code['name'], dt)
header[1] = dt_line
msg = header + msg_temp
f06_file.write(''.join(msg))
#fx, fy, fz, mx, my, mz
if self.is_sort1:
fx = self.data[itime, :, 0]
fy = self.data[itime, :, 1]
fz = self.data[itime, :, 2]
mx = self.data[itime, :, 3]
my = self.data[itime, :, 4]
mz = self.data[itime, :, 5]
else:
fx = self.data[:, itime, 0]
fy = self.data[:, itime, 1]
fz = self.data[:, itime, 2]
mx = self.data[:, itime, 3]
my = self.data[:, itime, 4]
mz = self.data[:, itime, 5]
for eid, fxi, fyi, fzi, mxi, myi, mzi in zip(eids, fx, fy, fz, mx, my, mz):
vals = (fxi, fyi, fzi, mxi, myi, mzi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(fxir, fyir, fzir, mxir, myir, mzir,
fxii, fyii, fzii, mxii, myii, mzii) = vals2
f06_file.write('0%26i %-13s %-13s %-13s %-13s %-13s %s\n'
' %26s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, fxir, fyir, fzir, mxir, myir, mzir,
'', fxii, fyii, fzii, mxii, myii, mzii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def _write_sort1_as_sort2(self, f06_file, page_num, page_stamp, header, msg_temp, is_mag_phase):
eids = self.element
times = self._times
for ieid, eid in enumerate(eids):
eid_line = ' ELEMENT-ID = %s' % (eid)
header[1] = eid_line
msg = header + msg_temp
f06_file.write(''.join(msg))
if self.is_sort1:
fx = self.data[:, ieid, 0]
fy = self.data[:, ieid, 1]
fz = self.data[:, ieid, 2]
mx = self.data[:, ieid, 3]
my = self.data[:, ieid, 4]
mz = self.data[:, ieid, 5]
else:
raise RuntimeError()
for dt, fxi, fyi, fzi, mxi, myi, mzi in zip(times, fx, fy, fz, mx, my, mz):
vals = (fxi, fyi, fzi, mxi, myi, mzi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(fxir, fyir, fzir, mxir, myir, mzir,
fxii, fyii, fzii, mxii, myii, mzii) = vals2
f06_file.write('0%26s %-13s %-13s %-13s %-13s %-13s %s\n'
' %26s %-13s %-13s %-13s %-13s %-13s %s\n' % (
write_float_12e(dt),
fxir, fyir, fzir, mxir, myir, mzir,
'', fxii, fyii, fzii, mxii, myii, mzii))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
if self.is_sort1:
struct1 = Struct(endian + b'i 12f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
fx = self.data[itime, :, 0]
fy = self.data[itime, :, 1]
fz = self.data[itime, :, 2]
mx = self.data[itime, :, 3]
my = self.data[itime, :, 4]
mz = self.data[itime, :, 5]
for eid, eid_device, fxi, fyi, fzi, mxi, myi, mzi in zip(eids, eids_device, fx, fy, fz, mx, my, mz):
data = [
eid_device,
fxi.real, fyi.real, fzi.real, mxi.real, myi.real, mzi.real,
fxi.imag, fyi.imag, fzi.imag, mxi.imag, myi.imag, mzi.imag,
]
vals = (fxi, fyi, fzi, mxi, myi, mzi)
vals2 = write_imag_floats_13e(vals, is_mag_phase)
(fxir, fyir, fzir, mxir, myir, mzir,
fxii, fyii, fzii, mxii, myii, mzii) = vals2
op2_ascii.write('0%26i %-13s %-13s %-13s %-13s %-13s %s\n'
' %26s %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, fxir, fyir, fzir, mxir, myir, mzir,
'', fxii, fyii, fzii, mxii, myii, mzii))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexCBeamForceVUArray(BaseElement): # 191-VUBEAM
"""
**ELTYPE = 191 Beam view element (VUBEAM)**
2 PARENT I Parent p-element identification number
3 COORD I CID coordinate system identification number
4 ICORD CHAR4 ICORD flat/curved and so on TCODE,7 =0 Real
5 VUGRID I VU grid ID for output grid
6 POSIT RS x/L position of VU grid identification number
7 POS(3) RS Y, Z, W coordinate of output point
10 NX RS Normal x
11 TXY RS Shear xy
12 TZX RS Shear zx
**ELTYPE = 191 Beam view element (VUBEAM)**
TCODE,7 = 1 Real/imaginary or magnitude/phase
5 VUGRID I VU grid identification number for output grid
6 POSIT RS x/L position of VU grid identification number
7 FORCEXR RS Force x real/mag.
8 SHEARYR RS Shear force y real/mag.
9 SHEARZR RS Shear force z real/mag.
10 TORSINR RS Torsional moment x real/mag.
11 BENDYR RS Bending moment y real/mag.
12 BENDZR RS Bending moment z real/mag.
13 FORCEXI RS Force x imag./phase
14 SHEARYI RS Shear force y imag./phase
15 SHEARZI RS Shear force z imag./phase
16 TORSINI RS Torsional moment x imag./phase
17 BENDYI RS Bending moment y imag./phase
18 BENDZI RS Bending moment z imag./phase
Words 5 through max repeat 2 times
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
BaseElement.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.ielement = 0
self.nelements = 0 # result specific
self.nnodes = None
#if is_sort1:
#pass
#else:
#raise NotImplementedError('SORT2')
@property
def is_real(self):
return False
@property
def is_complex(self):
return True
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
return ['xxb', 'force_x', 'shear_y', 'shear_z', 'torsion', 'bending_y', 'bending_z']
def build(self):
"""sizes the vectorized attributes of the ComplexCBendForceVUArray"""
#print("self.ielement = %s" % self.ielement)
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
if self.element_type in [191]: # VUBEAM
nnodes_per_element = 2
else:
raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
#print('nnodes_per_element[%s, %s] = %s' % (self.isubcase, self.element_type, nnodes_per_element))
self.nnodes = nnodes_per_element
#self.nelements //= nnodes_per_element
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s" % (
#self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = np.zeros(self.ntimes, dtype=dtype)
self.element_node = np.zeros((self.ntotal, 2), dtype='int32')
self.parent_coord = np.zeros((self.ntotal, 2), dtype='int32')
#[xxb, force_x, shear_y, shear_z, torsion, bending_y, bending_z]
self.data = np.zeros((self.ntimes, self.ntotal, 7), dtype='complex64')
#def build_dataframe(self):
#"""creates a pandas dataframe"""
#import pandas as pd
#headers = self.get_headers()
#nelements = self.element_node.shape[0] // 2
#if self.is_fiber_distance:
#fiber_distance = ['Top', 'Bottom'] * nelements
#else:
#fiber_distance = ['Mean', 'Curvature'] * nelements
#fd = np.array(fiber_distance, dtype='unicode')
#element_node = [self.element_node[:, 0], self.element_node[:, 1], fd]
#if self.nonlinear_factor not in (None, np.nan):
#column_names, column_values = self._build_dataframe_transient_header()
#self.data_frame = pd.Panel(self.data, items=column_values, major_axis=element_node, minor_axis=headers).to_frame()
#self.data_frame.columns.names = column_names
#self.data_frame.index.names = ['ElementID', 'NodeID', 'Location', 'Item']
#else:
## option B - nice!
#df1 = pd.DataFrame(element_node).T
#df1.columns = ['ElementID', 'NodeID', 'Location']
#df2 = pd.DataFrame(self.data[0])
#df2.columns = headers
#self.data_frame = df1.join(df2)
#self.data_frame = self.data_frame.reset_index().replace({'NodeID': {0:'CEN'}}).set_index(['ElementID', 'NodeID', 'Location'])
#print(self.data_frame)
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, element_nodei in enumerate(self.element_node):
(eid, nid) = element_nodei
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(xxb1, fx1, fy1, fz1, mx1, my1, mz1) = t1
(xxb2, fx2, fy2, fz2, mx2, my2, mz2) = t2
if not np.array_equal(t1, t2):
eid_nid1 = '(%s, %s) ' % (eid, nid)
eid_nid2 = ' ' * len(eid_nid1)
msg += ('%s(%s, %s, %s, %s, %s, %s, %s)\n%s(%s, %s, %s, %s, %s, %s, %s)\n' % (
eid_nid1,
xxb1, fx1, fy1, fz1, mx1, my1, mz1,
eid_nid2,
xxb2, fx2, fy2, fz2, mx2, my2, mz2))
i += 1
if i > 10:
#print(msg.replace('+0j,', '0,'))
raise ValueError(msg.replace('0j,', '0,').replace('+0j)', ')'))
#print(msg)
if i > 0:
raise ValueError(msg.replace('0j,', '0,').replace('+0j)', ')'))
return True
def _add_sort1(self, dt, eid, parent, coord, icord,
node_id, xxb, force_x, shear_y, shear_z, torsion, bending_y, bending_z):
assert eid is not None, eid
assert isinstance(node_id, int), node_id
self.element_node[self.itotal, :] = [eid, node_id]
self.parent_coord[self.itotal, :] = [parent, coord]
# TODO: save ICORD
#print('parent=%r, coord=%r, icord=%r' % (parent, coord, icord))
self.data[self.itime, self.itotal, :] = [xxb, force_x, shear_y, shear_z, torsion, bending_y, bending_z]
self.itotal += 1
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
nnodes = self.nnodes
ntotal = self.ntotal
nlayers = 2
nelements = self.ntotal // self.nnodes // 2
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msgi = ' type=%s ntimes=%i nelements=%i nnodes_per_element=%i nlayers=%i ntotal=%i\n' % (
self.__class__.__name__, ntimes, nelements, nnodes, nlayers, ntotal)
ntimes_word = 'ntimes'
else:
msgi = ' type=%s nelements=%i nnodes_per_element=%i nlayers=%i ntotal=%i\n' % (
self.__class__.__name__, nelements, nnodes, nlayers, ntotal)
ntimes_word = '1'
msg.append(msgi)
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n,
str(', '.join(headers))))
msg.append(' element_node.shape = %s\n' % str(self.element_node.shape).replace('L', ''))
msg.append(' data.shape=%s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = np.searchsorted(eids, self.element_node[:, 0]) #[0]
return itot
def eid_to_element_node_index(self, eids):
ind = np.ravel([np.searchsorted(self.element_node[:, 0] == eid) for eid in eids])
#ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
"""
C O M P L E X F O R C E S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )
(REAL/IMAGINARY)
VU-ELEMENT ID= 100001001, P-ELEMENT ID = 1, OUTPUT COORD. ID= 0, P OF EDGES = 3
VUGRID VUGRID DIST/ - BENDING MOMENTS - -WEB SHEARS - AXIAL TOTAL
ID. LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE
111001001 0.000 0.000000E+00 -1.598690E+05 0.000000E+00 -1.040952E+06 0.000000E+00 0.000000E+00
0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
111001002 0.333 0.000000E+00 5.328967E+04 0.000000E+00 1.872484E+05 0.000000E+00 0.000000E+00
0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
C O M P L E X S T R A I N S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )
(REAL/IMAGINARY)
VU-ELEMENT ID= 100001003, P-ELEMENT ID = 1, OUTPUT COORD. ID= 0, P OF EDGES = 3
VUGRID VUGRID DIST/ LOCATION LOCATION LOCATION LOCATION
ID. LENGTH C D E F
111001003 0.667 -2.557904E+00 -2.557904E+00 2.557904E+00 2.557904E+00
0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
111001004 1.000 7.673713E+00 7.673713E+00 -7.673713E+00 -7.673713E+00
0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00
"""
msg = [
' C O M P L E X F O R C E S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )\n'
' (REAL/IMAGINARY)\n'
' VU-ELEMENT ID= %9i, P-ELEMENT ID =%8i, OUTPUT COORD. ID=%8i, P OF EDGES = 3\n'
'\n'
' VUGRID VUGRID DIST/ - BENDING MOMENTS - -WEB SHEARS - AXIAL TOTAL \n'
' ID. LENGTH PLANE 1 PLANE 2 PLANE 1 PLANE 2 FORCE TORQUE \n'
#' 111001003 0.667 0.000000E+00 5.328967E+04 0.000000E+00 -1.872484E+05 0.000000E+00 0.000000E+00'
#' 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00'
#' 111001004 1.000 0.000000E+00 -1.598690E+05 0.000000E+00 1.040952E+06 0.000000E+00 0.000000E+00'
#' 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00'
#' C O M P L E X S T R A I N S I N P - V E R S I O N B E A M E L E M E N T S ( B E A M )\n'
#' (REAL/IMAGINARY)\n'
#' VU-ELEMENT ID= %9i, P-ELEMENT ID = 1, OUTPUT COORD. ID= 0, P OF EDGES = 3\n'
#'\n'
#' VUGRID VUGRID DIST/ LOCATION LOCATION LOCATION LOCATION \n'
#' ID. LENGTH C D E F \n'
#' 111001003 0.667 -2.557904E+00 -2.557904E+00 2.557904E+00 2.557904E+00'
#' 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00'
#' 111001004 1.000 7.673713E+00 7.673713E+00 -7.673713E+00 -7.673713E+00'
#' 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00'
]
if header is None:
header = []
#msg, nnodes, cen = _get_plate_msg(self)
# write the f06
ntimes = self.data.shape[0]
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
parent = self.parent_coord[:, 0]
coord = self.parent_coord[:, 1]
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
#[xxb, force_x, shear_y, shear_z, torsion, bending_y, bending_z]
xxb = self.data[itime, :, 0]
fx = self.data[itime, :, 1]
fy = self.data[itime, :, 2]
fz = self.data[itime, :, 3]
mx = self.data[itime, :, 4]
my = self.data[itime, :, 5]
mz = self.data[itime, :, 6]
for (i, eid, parenti, coordi, nid, xxbi, fxi, fyi, fzi, mxi, myi, mzi) in zip(
cycle(range(2)), eids, parent, coord, nids, xxb, fx, fy, fz, mx, my, mz):
if i == 0:
f06_file.write(''.join(header + msg) % (eid, parenti, coordi))
#out = write_imag_floats_13e([fxi, fyi, fzi, mxi, myi, mzi], is_mag_phase=is_mag_phase)
#[fxri, fyri, fzri, mxri, myri, mzri,
#fxii, fyii, fzii, mxii, myii, mzii] = out
# nid xxb
f06_file.write(
' %9i %.3f %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n'
' %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n' % (
nid, xxbi.real,
myi.real, mzi.real, fyi.real, fzi.real, fxi.real, mxi.real,
myi.imag, mzi.imag, fyi.imag, fzi.imag, fxi.imag, mxi.imag,
))
# stress/strain
#f06_file.write(
#' %9i %.3s %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n'
#' %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n' % (
#nid, xxbi.real,
#fxi.real, fyi.real, fzi.real, mxi.real, myi.real, mzi.real,
#fxi.imag, fyi.imag, fzi.imag, mxi.imag, myi.imag, mzi.imag,
#))
if i == 1:
f06_file.write(page_stamp % page_num + '\n')
page_num += 1
return page_num - 1
class ComplexForceVU_2DArray(BaseElement): # 189-VUQUAD,190-VUTRIA
def __init__(self, data_code, is_sort1, isubcase, dt):
BaseElement.__init__(self, data_code, isubcase)
#self.parent = {}
#self.coord = {}
#self.icord = {}
#self.theta = {}
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
self.ntimes = 0
# TODO if dt=None, handle SORT1 case
self.dt = dt
#if is_sort1:
#if dt is not None:
#self.add = self.add_sort1
#else:
#assert dt is not None
#self.add = self.add_sort2
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_stats(self, short=False):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n'
% (self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n'
% (self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def build(self):
"""sizes the vectorized attributes of the ComplexCShearForceArray"""
#print('%s ntimes=%s nelements=%s ntotal=%s' % (
#self.element_type, self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.ntotal = self.nelements
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element_node = zeros((self.nelements, 2), dtype='int32')
#[membrane_x, membrane_y, membrane_xy, bending_x, bending_y, bending_xy,
# shear_yz, shear_xz]
self.data =
|
zeros((self.ntimes, self.nelements, 8), dtype='complex64')
|
numpy.zeros
|
# coding: utf-8
# In[1]:
# import the packages
import numpy as np
from scipy.misc import imread, imresize, imsave
import matplotlib.pyplot as plt
from numpy import matlib
import math
from scipy import stats
import imageio
from skimage.transform import resize
import skimage
import zlib, sys
import gzip
import matplotlib
import scipy
import copy
import random
import numpy
import sympy as sp
# In[2]:
# define a function to covert the image to a gray scale image
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
# In[3]:
L0=0.99436891104358
L1=0.41984465132951
L2=-0.17677669529664
L3=-0.06629126073624
L4=0.03314563036812
H0=-0.70710678118655
H1=0.35355339059327
H2=0
H3=0
H4=0
def CDF(N):
TA=
|
np.zeros((N,N+8))
|
numpy.zeros
|
"""No-UI PlotOptiX raytracer (output to numpy array only).
https://github.com/rnd-team-dev/plotoptix/blob/master/LICENSE.txt
Have a look at examples on GitHub: https://github.com/rnd-team-dev/plotoptix.
"""
import json, math, logging, os, threading, time
import numpy as np
from ctypes import byref, c_ubyte, c_float, c_uint, c_int, c_longlong
from typing import List, Tuple, Callable, Optional, Union, Any
from plotoptix.singleton import Singleton
from plotoptix.geometry import GeometryMeta
from plotoptix._load_lib import load_optix, PARAM_NONE_CALLBACK, PARAM_INT_CALLBACK
from plotoptix.utils import _make_contiguous_vector, _make_contiguous_3d
from plotoptix.enums import *
class NpOptiX(threading.Thread, metaclass=Singleton):
"""No-UI raytracer, output to numpy array only.
Base, headless interface to the `RnD.SharpOptiX` raytracing engine. Provides
infrastructure for running the raytracing and compute threads and exposes
their callbacks to the user. Outputs raytraced image to numpy array.
In derived UI classes, implement in overriden methods:
- start and run UI event loop in: :meth:`plotoptix.NpOptiX._run_event_loop`
- raise UI close event in: :meth:`plotoptix.NpOptiX.close`
- update image in UI in: :meth:`plotoptix.NpOptiX._launch_finished_callback`
- optionally apply UI edits in: :meth:`plotoptix.NpOptiX._scene_rt_starting_callback`
Parameters
----------
src : string or dict, optional
Scene description, file name or dictionary. Empty scene is prepared
if the default ``None`` value is used.
on_initialization : callable or list, optional
Callable or list of callables to execute upon starting the raytracing
thread. These callbacks are executed on the main thread.
on_scene_compute : callable or list, optional
Callable or list of callables to execute upon starting the new frame.
Callbacks are executed in a thread parallel to the raytracing.
on_rt_completed : callable or list, optional
Callable or list of callables to execute when the frame raytracing
is completed (execution may be paused with pause_compute() method).
Callbacks are executed in a thread parallel to the raytracing.
on_launch_finished : callable or list, optional
Callable or list of callables to execute when the frame raytracing
is completed. These callbacks are executed on the raytracing thread.
on_rt_accum_done : callable or list, optional
Callable or list of callables to execute when the last accumulation
frame is finished. These callbacks are executed on the raytracing thread.
width : int, optional
Pixel width of the raytracing output. Default value is 16.
height : int, optional
Pixel height of the raytracing output. Default value is 16.
start_now : bool, optional
Start raytracing thread immediately. If set to ``False``, then user should
call ``start()`` method. Default is ``False``.
devices : list, optional
List of selected devices, with the primary device at index 0. Empty list
is default, resulting with all compatible devices selected for processing.
log_level : int or string, optional
Log output level. Default is ``WARN``.
"""
_img_rgba = None
"""Ray-tracing output, 8bps color.
Shape: ``(height, width, 4)``, ``dtype = np.uint8``, contains RGBA data
(alpha channel is now constant, ``255``).
A ndarray wrapped aroud the gpu bufffer. It enables reading the image with no
additional memory copy. Access the buffer in the ``on_launch_finished`` callback
or in/after the ``on_rt_accum_done`` callback to avoid reading while the buffer
content is being updated.
"""
_raw_rgba = None
"""Ray-tracing output, raw floating point data.
Shape: ``(height, width, 4)``, ``dtype = np.float32``, contains RGBA data
(alpha channel is now constant, ``1.0``).
A ndarray wrapped aroud the gpu bufffer. It enables reading the image with no
additional memory copy. Access the buffer in the ``on_launch_finished`` callback
or in/after the ``on_rt_accum_done`` callback to avoid reading while the buffer
content is being updated.
"""
_hit_pos = None
"""Hit position.
Shape: ``(height, width, 4)``, ``dtype = np.float32``, contains ``[X, Y, Z, D]`` data, where
``XYZ`` is the hit 3D position and ``D`` is the hit distance to the camera plane.
"""
_geo_id = None
"""Object info.
Encodes the object handle and primitive index (or vertex/face index for meshes)
for each pixel in the output image.
Shape: ``(height, width, 2)``, ``dtype = np.int32``, contains:
- ``_geo_id[h, w, 0] = handle | (vtx_id << 30)``, where ``handle`` is the object
handle, ``vtx_id`` is the vertex index for the triangular face that was hit
(values are ``0``, ``1``, ``2``);
- ``_geo_id[h, w, 1] = prim_idx``, where ``prim_idx`` is the primitive index in
a data set, or face index of a mesh.
"""
_albedo = None
"""Surface albedo.
Shape: ``(height, width, 4)``, ``dtype = np.float32``, contains RGBA data
(alpha channel is now constant, ``0.0``).
Available when denoiser is enabled (:attr:`plotoptix.enums.Postprocessing.Denoiser`),
and set to :attr:`plotoptix.enums.DenoiserKind.RgbAlbedo`
or :attr:`plotoptix.enums.DenoiserKind.RgbAlbedoNormal` mode, or when ``save_albedo``
parameter is set to ``True`` (see :meth:`plotoptix.NpOptiX.set_param`).
"""
_normal = None
"""Surface normal.
Shape: ``(height, width, 4)``, ``dtype = np.float32``, contains XYZ0 data
(4'th channel is constant, ``0.0``).
Surface normal vector in camera space. Available only when the denoiser is enabled
(:attr:`plotoptix.enums.Postprocessing.Denoiser`), and set to
:attr:`plotoptix.enums.DenoiserKind.RgbAlbedoNormal` mode, or when ``save_normals``
parameter is set to ``True`` (see :meth:`plotoptix.NpOptiX.set_param`).
"""
def __init__(self,
src: Optional[Union[str, dict]] = None,
on_initialization = None,
on_scene_compute = None,
on_rt_completed = None,
on_launch_finished = None,
on_rt_accum_done = None,
width: int = -1,
height: int = -1,
start_now: bool = False,
devices: List = [],
log_level: Union[int, str] = logging.WARN) -> None:
"""NpOptiX constructor.
"""
super().__init__()
self._raise_on_error = False
self._logger = logging.getLogger(__name__ + "-NpOptiX")
self._logger.setLevel(log_level)
self._started_event = threading.Event()
self._padlock = threading.RLock()
self._is_scene_created = False
self._is_started = False
self._is_closed = False
rt_log = 0
if isinstance(log_level, int):
if log_level == logging.ERROR: rt_log = 1
elif log_level == logging.WARNING: rt_log = 2
elif log_level == logging.INFO: rt_log = 3
elif log_level == logging.DEBUG: rt_log = 4
if isinstance(log_level, str):
if log_level == 'ERROR': rt_log = 1
elif log_level == 'WARNING': rt_log = 2
elif log_level == 'WARN': rt_log = 2
elif log_level == 'INFO': rt_log = 3
elif log_level == 'DEBUG': rt_log = 4
# load SharpOptiX library, configure paths ####################
self._logger.info("Configure RnD.SharpOptiX library...")
self._optix = load_optix()
self._logger.info("...done.")
###############################################################
# setup SharpOptiX interface ##################################
self._logger.info("Preparing empty scene...")
self._width = 0
self._height = 0
if width < 1: width = 1
if height < 1: height = 1
self.resize(width, height)
self.geometry_data = {} # geometry name to metadata dictionary
self.geometry_names = {} # geometry handle to name dictionary
self.camera_handles = {} # camera name to handle dictionary
self.camera_names = {} # camera handle to name dictionary
self.light_handles = {} # light name to handle dictionary
self.light_names = {} # light handle to name dictionary
# scene initialization / compute / upload / accumulation done callbacks:
if on_initialization is not None: self._initialization_cb = self._make_list_of_callable(on_initialization)
elif src is None: self._initialization_cb = [self._default_initialization]
else: self._initialization_cb = []
self.set_scene_compute_cb(on_scene_compute)
self.set_rt_completed_cb(on_rt_completed)
self.set_rt_starting_cb(cb=None)
self.set_launch_finished_cb(on_launch_finished)
self.set_accum_done_cb(on_rt_accum_done)
device_ptr = 0
device_count = 0
if len(devices) > 0:
self._logger.info("Configure selected devices.")
device_idx = [int(d) for d in devices]
device_idx = np.ascontiguousarray(device_idx, dtype=np.int32)
device_ptr = device_idx.ctypes.data
device_count = device_idx.shape[0]
if src is None: # create empty scene
self._logger.info(" - ray-tracer initialization")
self._is_scene_created = self._optix.create_empty_scene(self._width, self._height, device_ptr, device_count, rt_log)
if self._is_scene_created: self._logger.info("Empty scene ready.")
elif isinstance(src, str): # create scene from file
if not os.path.isfile(src):
msg = "File %s not found." % src
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
wd = os.getcwd()
if os.path.isabs(src):
d, f = os.path.split(src)
os.chdir(d)
else: f = src
self._is_scene_created = self._optix.create_scene_from_file(f, self._width, self._height, device_ptr, device_count)
self._is_scene_created &= self._init_scene_metadata()
if self._is_scene_created:
self._logger.info("Scene loaded correctly.")
os.chdir(wd)
elif isinstance(src, dict): # create scene from dictionary
s = json.dumps(src)
self._is_scene_created = self._optix.create_scene_from_json(s, self._width, self._height, device_ptr, device_count)
self._is_scene_created &= self._init_scene_metadata()
if self._is_scene_created: self._logger.info("Scene loaded correctly.")
else:
msg = "Scene source type not supported."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
if self._is_scene_created:
# optionally start raytracing thread:
if start_now: self.start()
else: self._logger.info("Use start() to start raytracing.")
else:
msg = "Initial setup failed, see errors above."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
###############################################################
def __del__(self):
"""Release resources.
"""
if self._is_scene_created and not self._is_closed:
if self._is_started: self.close()
else: self._optix.destroy_scene()
def get_gpu_architecture(self, ordinal: int) -> Optional[GpuArchitecture]:
"""Get SM architecture of selected GPU.
Returns architecture of selected GPU.
Parameters
----------
ordinal : int
CUDA ordinal of the GPU.
Returns
-------
out : GpuArchitecture or None
SM architecture or ``None`` if not recognized.
See Also
--------
:py:mod:`plotoptix.enums.GpuArchitecture`
"""
cfg = self._optix.get_n_gpu_architecture(ordinal)
if cfg >= 0: return GpuArchitecture(cfg)
else: return None
def _make_list_of_callable(self, items) -> List[Callable[["NpOptiX"], None]]:
if callable(items): return [items]
else:
for item in items:
assert callable(item), "Expected callable or list of callable items."
return items
def start(self) -> None:
"""Start the raytracing, compute, and UI threads.
Actions provided with ``on_initialization`` parameter of NpOptiX
constructor are executed by this method on the main thread,
before starting the ratracing thread.
"""
if self._is_closed:
self._logger.warn("Raytracing output was closed, cannot re-open.")
return
if self._is_started:
self._logger.warn("Raytracing output already running.")
return
for c in self._initialization_cb: c(self)
self._logger.info("Initialization done.")
self._optix.start_rt()
self._logger.info("RT loop ready.")
super().start()
if self._started_event.wait(600):
self._logger.info("Raytracing started.")
self._is_started = True
else:
msg = "Raytracing output startup timed out."
self._logger.error(msg)
self._is_started = False
if self._raise_on_error: raise TimeoutError(msg)
def update_device_buffers(self):
"""Update buffer pointers.
Use after changing denoiser mode since albedo and normal
buffer wrappers are not updated automatically.
"""
c_buf = c_longlong()
c_len = c_int()
r_buf = c_longlong()
r_len = c_int()
h_buf = c_longlong()
h_len = c_int()
g_buf = c_longlong()
g_len = c_int()
a_buf = c_longlong()
a_len = c_int()
n_buf = c_longlong()
n_len = c_int()
if self._optix.get_device_buffers(
byref(c_buf), byref(c_len),
byref(r_buf), byref(r_len),
byref(h_buf), byref(h_len),
byref(g_buf), byref(g_len),
byref(a_buf), byref(a_len),
byref(n_buf), byref(n_len)):
buf = (((c_ubyte * 4) * self._width) * self._height).from_address(c_buf.value)
self._img_rgba = np.ctypeslib.as_array(buf)
buf = (((c_float * 4) * self._width) * self._height).from_address(r_buf.value)
self._raw_rgba = np.ctypeslib.as_array(buf)
buf = (((c_float * 4) * self._width) * self._height).from_address(h_buf.value)
self._hit_pos = np.ctypeslib.as_array(buf)
buf = (((c_uint * 2) * self._width) * self._height).from_address(g_buf.value)
self._geo_id = np.ctypeslib.as_array(buf)
if a_len.value > 0:
buf = (((c_float * 4) * self._width) * self._height).from_address(a_buf.value)
self._albedo = np.ctypeslib.as_array(buf)
else: self._albedo = None
if n_len.value > 0:
buf = (((c_float * 4) * self._width) * self._height).from_address(n_buf.value)
self._normal = np.ctypeslib.as_array(buf)
else: self._normal = None
else:
msg = "Image buffers setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def run(self):
"""Starts UI event loop.
Derived from `threading.Thread <https://docs.python.org/3/library/threading.html>`__.
Use :meth:`plotoptix.NpOptiX.start` to perform complete initialization.
**Do not override**, use :meth:`plotoptix.NpOptiX._run_event_loop` instead.
"""
assert self._is_scene_created, "Scene is not ready, see initialization messages."
c_buf = c_longlong()
c_len = c_int()
r_buf = c_longlong()
r_len = c_int()
h_buf = c_longlong()
h_len = c_int()
g_buf = c_longlong()
g_len = c_int()
a_buf = c_longlong()
a_len = c_int()
n_buf = c_longlong()
n_len = c_int()
if self._optix.resize_scene(self._width, self._height,
byref(c_buf), byref(c_len),
byref(r_buf), byref(r_len),
byref(h_buf), byref(h_len),
byref(g_buf), byref(g_len),
byref(a_buf), byref(a_len),
byref(n_buf), byref(n_len)):
buf = (((c_ubyte * 4) * self._width) * self._height).from_address(c_buf.value)
self._img_rgba = np.ctypeslib.as_array(buf)
buf = (((c_float * 4) * self._width) * self._height).from_address(r_buf.value)
self._raw_rgba = np.ctypeslib.as_array(buf)
buf = (((c_float * 4) * self._width) * self._height).from_address(h_buf.value)
self._hit_pos = np.ctypeslib.as_array(buf)
buf = (((c_uint * 2) * self._width) * self._height).from_address(g_buf.value)
self._geo_id = np.ctypeslib.as_array(buf)
if a_len.value > 0:
buf = (((c_float * 4) * self._width) * self._height).from_address(a_buf.value)
self._albedo = np.ctypeslib.as_array(buf)
else: self._albedo = None
if n_len.value > 0:
buf = (((c_float * 4) * self._width) * self._height).from_address(n_buf.value)
self._normal = np.ctypeslib.as_array(buf)
else: self._normal = None
else:
msg = "Image buffers setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
c1_ptr = self._get_launch_finished_callback()
r1 = self._optix.register_launch_finished_callback(c1_ptr)
c2_ptr = self._get_accum_done_callback()
r2 = self._optix.register_accum_done_callback(c2_ptr)
c3_ptr = self._get_scene_rt_starting_callback()
r3 = self._optix.register_scene_rt_starting_callback(c3_ptr)
c4_ptr = self._get_start_scene_compute_callback()
r4 = self._optix.register_start_scene_compute_callback(c4_ptr)
c5_ptr = self._get_scene_rt_completed_callback()
r5 = self._optix.register_scene_rt_completed_callback(c5_ptr)
if r1 & r2 & r3 & r4 & r5: self._logger.info("Callbacks registered.")
else:
msg = "Callbacks setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
self._run_event_loop()
###########################################################################
def _run_event_loop(self):
"""Internal method for running the UI event loop.
This method should be overriden in derived UI class (but **do not call
this base implementation**).
Remember to set self._started_event after all your UI initialization.
"""
self._started_event.set()
while not self._is_closed: time.sleep(0.5)
###########################################################################
###########################################################################
def close(self) -> None:
"""Stop the raytracing thread, release resources.
Raytracing cannot be restarted after this method is called.
Override in UI class, call this base implementation (or raise a close
event for your UI and call this base implementation there).
"""
assert not self._is_closed, "Raytracing output already closed."
assert self._is_started, "Raytracing output not yet running."
with self._padlock:
self._logger.info("Stopping raytracing output.")
self._is_scene_created = False
self._is_started = False
self._optix.stop_rt()
self._optix.destroy_scene()
self._is_closed = True
###########################################################################
def is_started(self) -> bool: return self._is_started
def is_closed(self) -> bool: return self._is_closed
def get_rt_output(self,
bps: Union[ChannelDepth, str] = ChannelDepth.Bps8,
channels: Union[ChannelOrder, str] = ChannelOrder.RGBA) -> Optional[np.ndarray]:
"""Return a copy of the output image.
The image data type is specified with the ``bps`` argument. 8 bit per channel data,
``numpy.uint8``, is returned by default. Use ``Bps16`` value to read the image in
16 bit per channel depth, ``numpy.uint16``. Use ``Bps32`` value to read the HDR image
in 32 bit per channel format, ``numpy.float32``.
If channels ordering includes alpha channel then it is a constant, 100% opaque value,
to be used in the future releases.
Safe to call at any time, from any thread.
Parameters
----------
bps : ChannelDepth enum or string, optional
Color depth.
channels : ChannelOrder enum or string, optional
Color channels ordering.
Returns
-------
out : ndarray
RGB(A) array of shape (height, width, 3) or (height, width, 4) and type corresponding
to ``bps`` argument. ``None`` in case of errors.
See Also
--------
:class:`plotoptix.enums.ChannelDepth`, :class:`plotoptix.enums.ChannelOrder`
"""
assert self._is_started, "Raytracing output not running."
if isinstance(bps, str): bps = ChannelDepth[bps]
if isinstance(channels, str): channels = ChannelOrder[channels]
a = None
try:
self._padlock.acquire()
if bps == ChannelDepth.Bps8 and channels == ChannelOrder.RGBA:
if self._img_rgba is not None:
return self._img_rgba.copy()
else:
a = np.ascontiguousarray(np.zeros((self._height, self._width, 4), dtype=np.uint8))
if bps == ChannelDepth.Bps8:
if channels == ChannelOrder.BGRA:
a = np.ascontiguousarray(np.zeros((self._height, self._width, 4), dtype=np.uint8))
elif channels == ChannelOrder.RGB or channels == ChannelOrder.BGR:
a = np.ascontiguousarray(np.zeros((self._height, self._width, 3), dtype=np.uint8))
elif bps == ChannelDepth.Bps16:
if channels == ChannelOrder.RGBA or channels == ChannelOrder.BGRA:
a = np.ascontiguousarray(np.zeros((self._height, self._width, 4), dtype=np.uint16))
elif channels == ChannelOrder.RGB or channels == ChannelOrder.BGR:
a = np.ascontiguousarray(np.zeros((self._height, self._width, 3), dtype=np.uint16))
elif bps == ChannelDepth.Bps32:
if channels == ChannelOrder.RGBA or channels == ChannelOrder.BGRA:
a = np.ascontiguousarray(np.zeros((self._height, self._width, 4), dtype=np.float32))
elif channels == ChannelOrder.RGB or channels == ChannelOrder.BGR:
a = np.ascontiguousarray(np.zeros((self._height, self._width, 3), dtype=np.float32))
else: return a
if not self._optix.get_output(a.ctypes.data, a.nbytes, bps.value, channels.value):
msg = "Image not copied."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
return a
def resize(self, width: Optional[int] = None, height: Optional[int] = None) -> None:
"""Change dimensions of the raytracing output.
Both or one of the dimensions may be provided. No effect if width and height
is same as of the current output.
Parameters
----------
width : int, optional
New width of the raytracing output.
height : int, optional
New height of the raytracing output.
"""
if width is None: width = self._width
if height is None: height = self._height
if (width == self._width) and (height == self._height): return
with self._padlock:
self._width = width
self._height = height
# resize the scene, update gpu memory address
c_buf = c_longlong()
c_len = c_int()
r_buf = c_longlong()
r_len = c_int()
h_buf = c_longlong()
h_len = c_int()
g_buf = c_longlong()
g_len = c_int()
a_buf = c_longlong()
a_len = c_int()
n_buf = c_longlong()
n_len = c_int()
if self._optix.resize_scene(self._width, self._height,
byref(c_buf), byref(c_len),
byref(r_buf), byref(r_len),
byref(h_buf), byref(h_len),
byref(g_buf), byref(g_len),
byref(a_buf), byref(a_len),
byref(n_buf), byref(n_len)):
buf = (((c_ubyte * 4) * self._width) * self._height).from_address(c_buf.value)
#buf_from_mem = ctypes.pythonapi.PyMemoryView_FromMemory
#buf_from_mem.restype = ctypes.py_object
#buf_from_mem.argtypes = (ctypes.c_void_p, ctypes.c_int, ctypes.c_int)
#buf = buf_from_mem(c_buf.value, c_len.value, 0x100)
self._img_rgba = np.ctypeslib.as_array(buf)
#self._img_rgba = np.ndarray((height, width, 4), np.uint8, buf, order='C')
#print(self._img_rgba.shape, self._img_rgba.__array_interface__)
#print(self._img_rgba[int(height/2), int(width/2)])
buf = (((c_float * 4) * self._width) * self._height).from_address(r_buf.value)
self._raw_rgba = np.ctypeslib.as_array(buf)
buf = (((c_float * 4) * self._width) * self._height).from_address(h_buf.value)
self._hit_pos = np.ctypeslib.as_array(buf)
buf = (((c_uint * 2) * self._width) * self._height).from_address(g_buf.value)
self._geo_id = np.ctypeslib.as_array(buf)
if a_len.value > 0:
buf = (((c_float * 4) * self._width) * self._height).from_address(a_buf.value)
self._albedo = np.ctypeslib.as_array(buf)
else: self._albedo = None
if n_len.value > 0:
buf = (((c_float * 4) * self._width) * self._height).from_address(n_buf.value)
self._normal = np.ctypeslib.as_array(buf)
else: self._normal = None
else:
self._img_rgba = None
self._hit_pos = None
self._geo_id = None
self._albedo = None
self._normal = None
@staticmethod
def _default_initialization(wnd) -> None:
wnd._logger.info("Default scene initialization.")
if wnd._optix.get_current_camera() == 0:
wnd.setup_camera("default", [0, 0, 10], [0, 0, 0])
###########################################################################
def set_launch_finished_cb(self, cb) -> None:
"""Set callback function(s) executed after each finished frame.
Parameters
----------
cb : callable or list
Callable or list of callables to set as the launch finished callback.
"""
with self._padlock:
if cb is not None: self._launch_finished_cb = self._make_list_of_callable(cb)
else: self._launch_finished_cb = []
def _launch_finished_callback(self, rt_result: int) -> None:
"""
Callback executed after each finished frame (``min_accumulation_step``
accumulation frames are raytraced together). This callback is
executed in the raytracing thread and should not compute extensively
(get/save the image data here but calculate scene etc in another thread).
Override this method in the UI class, call this base implementation
and update image in UI (or raise an event to do so).
Actions provided with ``on_launch_finished`` parameter of NpOptiX
constructor are executed here.
Parameters
----------
rt_result : int
Raytracing result code corresponding to :class:`plotoptix.enums.RtResult`.
"""
if self._is_started:
if rt_result < RtResult.NoUpdates.value:
#self._logger.info("Launch finished.")
with self._padlock:
for c in self._launch_finished_cb: c(self)
def _get_launch_finished_callback(self):
def func(rt_result: int): self._launch_finished_callback(rt_result)
return PARAM_INT_CALLBACK(func)
###########################################################################
###########################################################################
def set_rt_starting_cb(self, cb) -> None:
"""Set callback function(s) executed before each frame raytracing.
Parameters
----------
cb : callable or list
Callable or list of callables to set as the rt starting callback.
"""
with self._padlock:
if cb is not None: self._rt_starting_cb = self._make_list_of_callable(cb)
else: self._rt_starting_cb = []
def _scene_rt_starting_callback(self) -> None:
"""
Callback executed before starting frame raytracing. Appropriate to
override in UI class and apply scene edits (or raise an event to do
so) like camera rotations, etc. made by a user in UI.
This callback is executed in the raytracing thread and should not
compute extensively.
"""
for c in self._rt_starting_cb: c(self)
def _get_scene_rt_starting_callback(self):
def func(): self._scene_rt_starting_callback()
return PARAM_NONE_CALLBACK(func)
###########################################################################
###########################################################################
def set_accum_done_cb(self, cb) -> None:
"""Set callback function(s) executed when all accumulation frames
are completed.
Parameters
----------
cb : callable or list
Callable or list of callables to set as the accum done callback.
"""
with self._padlock:
if cb is not None: self._rt_accum_done_cb = self._make_list_of_callable(cb)
else: self._rt_accum_done_cb = []
def _accum_done_callback(self) -> None:
"""
Callback executed when all accumulation frames are completed.
**Do not override**, it is intended to launch ``on_rt_accum_done``
actions provided with NpOptiX constructor parameters.
Executed in the raytracing thread, so do not compute or write files
(make a copy of the image data and process it in another thread).
"""
if self._is_started:
self._logger.info("RT accumulation finished.")
with self._padlock:
for c in self._rt_accum_done_cb: c(self)
def _get_accum_done_callback(self):
def func(): self._accum_done_callback()
return PARAM_NONE_CALLBACK(func)
###########################################################################
###########################################################################
def set_scene_compute_cb(self, cb) -> None:
"""Set callback function(s) executed on each frame ray tracing start.
Callback(s) executed in parallel to the raytracing and intended for
CPU intensive computations. Note, set ``compute_timeout`` to appropriate
value if your computations are longer than single frame ray tracing, see
:meth:`plotoptix.NpOptiX.set_param`.
Parameters
----------
cb : callable or list
Callable or list of callables to set as the scene compute callback.
"""
with self._padlock:
if cb is not None: self._scene_compute_cb = self._make_list_of_callable(cb)
else: self._scene_compute_cb = []
def _start_scene_compute_callback(self, n_frames : int) -> None:
"""
Compute callback executed together with the start of each frame raytracing.
This callback is executed in parallel to the raytracing and is intended
for CPU intensive computations. Do not set, update data, cameras, lights,
etc. here, as it will block until the end of raytracing in the parallel
thread.
Callback execution can be suspended / resumed with :meth:`plotoptix.NpOptiX.pause_compute` /
:meth:`plotoptix.NpOptiX.resume_compute` methods.
**Do not override**, this method is intended to launch ``on_scene_compute``
actions provided with NpOptiX constructor parameters.
Parameters
----------
n_frames : int
Number of the raytraced frames since the last call (excluding paused
cycles).
"""
if self._is_started:
self._logger.info("Compute, delta %d frames.", n_frames)
for c in self._scene_compute_cb: c(self, n_frames)
def _get_start_scene_compute_callback(self):
def func(n_frames : int): self._start_scene_compute_callback(n_frames)
return PARAM_INT_CALLBACK(func)
def set_rt_completed_cb(self, cb) -> None:
"""Set callback function(s) executed on each frame ray tracing finished.
Callback(s) executed in the same thread as the scene compute callback. Note,
set ``compute_timeout`` to appropriate value if your computations are longer
than single frame ray tracing, see :meth:`plotoptix.NpOptiX.set_param`.
Parameters
----------
cb : callable or list
Callable or list of callables to set as the RT completed callback.
"""
with self._padlock:
if cb is not None: self._rt_completed_cb = self._make_list_of_callable(cb)
else: self._rt_completed_cb = []
def _scene_rt_completed_callback(self, rt_result : int) -> None:
"""
Callback executed in the same thread as _start_scene_compute_callback,
after it finishes computations.
This callback is synchronized also with the raytracing thread and should
be used for any uploads of the updated scene to GPU: data, cameras, lights
setup or updates. Image updates in UI are also possible here, but note that
callback execution can be suspended / resumed with pause_compute() /
resume_compute() methods.
**Do not override**, this method is intended to launch on_rt_completed
actions provided with __init__ method parameters.
Parameters
----------
rt_result : int
Raytracing result code corresponding to RtResult enum.
"""
if self._is_started and rt_result <= RtResult.NoUpdates.value:
self._logger.info("RT completed, result %s.", RtResult(rt_result))
for c in self._rt_completed_cb: c(self)
def _get_scene_rt_completed_callback(self):
def func(rt_result : int): self._scene_rt_completed_callback(rt_result)
return PARAM_INT_CALLBACK(func)
###########################################################################
def pause_compute(self) -> None:
"""Suspend execution of ``on_scene_compute`` / ``on_rt_completed`` actions.
"""
if self._optix.set_compute_paused(True):
self._logger.info("Compute thread paused.")
else:
self._logger.warn("Pausing compute thread had no effect.")
def resume_compute(self) -> None:
"""Resume execution of ``on_scene_compute`` / ``on_rt_completed actions``.
"""
if self._optix.set_compute_paused(False):
self._logger.info("Compute thread resumed.")
else:
msg = "Resuming compute thread had no effect."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def refresh_scene(self) -> None:
"""Refresh scene
Starts raytracing accumulation from scratch.
"""
self._optix.refresh_scene()
def get_float(self, name: str) -> Optional[float]:
"""Get shader ``float`` variable with given ``name``.
Parameters
----------
name : string
Variable name.
Returns
-------
out : float
Value of the variable or ``None`` if variable not found.
"""
if not isinstance(name, str): name = str(name)
c_x = c_float()
if self._optix.get_float(name, byref(c_x)):
self._logger.info("Variable float %s = %f", name, c_x.value)
return c_x.value
else:
msg = "Variable float %s not found." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
def get_float2(self, name: str) -> (Optional[float], Optional[float]):
"""Get shader ``float2`` variable with given ``name``.
Parameters
----------
name : string
Variable name.
Returns
-------
out : tuple (float, float)
Value (x, y) of the variable or ``(None, None)`` if variable not found.
"""
if not isinstance(name, str): name = str(name)
c_x = c_float()
c_y = c_float()
if self._optix.get_float2(name, byref(c_x), byref(c_y)):
self._logger.info("Variable float2 %s = (%f, %f)", name, c_x.value, c_y.value)
return c_x.value, c_y.value
else:
msg = "Variable float2 %s not found." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None, None
def get_float3(self, name: str) -> (Optional[float], Optional[float], Optional[float]):
"""Get shader ``float3`` variable with given ``name``.
Parameters
----------
name : string
Variable name.
Returns
-------
out : tuple (float, float, float)
Value (x, y, z) of the variable or ``(None, None, None)`` if variable not found.
"""
if not isinstance(name, str): name = str(name)
c_x = c_float()
c_y = c_float()
c_z = c_float()
if self._optix.get_float3(name, byref(c_x), byref( c_y), byref(c_z)):
self._logger.info("Variable float3 %s = (%f, %f, %f)", name, c_x.value, c_y.value, c_z.value)
return c_x.value, c_y.value, c_z.value
else:
msg = "Variable float3 %s not found." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None, None, None
def set_float(self, name: str, x: float, y: Optional[float] = None, z: Optional[float] = None, refresh: bool = False) -> None:
"""Set shader variable.
Set shader variable with given ``name`` and of the type ``float``, ``float2``
(if y provided), or ``float3`` (if y and z provided). Raytrace the whole
scene if refresh is set to ``True``.
Parameters
----------
name : string
Vairable name.
x : float
Variable value (x component in case of ``float2`` and ``float3``).
y : float, optional
Y component value for ``float2`` and ``float3`` variables.
z : float, optional
Z component value for ``float3`` variables.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
Examples
--------
>>> optix = TkOptiX()
>>> optix.set_float("tonemap_exposure", 0.8)
>>> optix.set_float("tonemap_gamma", 2.2)
"""
if not isinstance(name, str): name = str(name)
if not isinstance(x, float): x = float(x)
if z is not None: # expect float3
if not isinstance(z, float): z = float(z)
if not isinstance(y, float): y = float(y)
self._optix.set_float3(name, x, y, z, refresh)
return
if y is not None: # expect float2
if not isinstance(y, float): y = float(y)
self._optix.set_float2(name, x, y, refresh)
return
self._optix.set_float(name, x, refresh)
def get_uint(self, name: str) -> Optional[int]:
"""Get shader ``uint`` variable with given ``name``.
Parameters
----------
name : string
Variable name.
Returns
-------
out : int
Value of the variable or ``None`` if variable not found.
"""
if not isinstance(name, str): name = str(name)
c_x = c_uint()
if self._optix.get_uint(name, byref(c_x)):
self._logger.info("Variable uint %s = %d", name, c_x.value)
return c_x.value
else:
msg = "Variable uint %s not found." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
def get_uint2(self, name: str) -> (Optional[int], Optional[int]):
"""Get shader ``uint2`` variable with given ``name``.
Parameters
----------
name : string
Variable name.
Returns
-------
out : tuple (int, int)
Value (x, y) of the variable or ``(None, None)`` if variable not found.
"""
if not isinstance(name, str): name = str(name)
c_x = c_uint()
c_y = c_uint()
if self._optix.get_uint2(name, byref(c_x), byref(c_y)):
self._logger.info("Variable uint2 %s = (%d, %d)", name, c_x.value, c_y.value)
return c_x.value, c_y.value
else:
msg = "Variable uint2 %s not found." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None, None
def set_uint(self, name: str, x: int, y: Optional[int] = None, refresh: bool = False) -> None:
"""Set shader variable.
Set shader variable with given ``name`` and of the type ``uint`` or ``uint2``
(if y provided). Raytrace the whole scene if refresh is set to ``True``.
Note, shader variables distinguish ``int`` and ``uint`` while the type
provided by Python methods is ``int`` in both cases.
Parameters
----------
name : string
Variable name.
x : int
Variable value (x component in case of ``uint2``).
y : int, optional
Y component value for ``uint2`` variable.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
Examples
--------
>>> optix = TkOptiX()
>>> optix.set_uint("path_seg_range", 4, 16) # set longer range of traced path segments
"""
if not isinstance(name, str): name = str(name)
if not isinstance(x, int): x = int(x)
if y is not None: # expect uint2
if not isinstance(y, int): y = int(y)
self._optix.set_uint2(name, x, y, refresh)
return
self._optix.set_uint(name, x, refresh)
def get_int(self, name: str) -> Optional[int]:
"""Get shader ``int`` variable with given ``name``.
Parameters
----------
name : string
Variable name.
Returns
-------
out : int
Value of the variable or ``None`` if variable not found.
"""
if not isinstance(name, str): name = str(name)
c_x = c_int()
if self._optix.get_int(name, byref(c_x)):
self._logger.info("Variable int %s = %d", name, c_x.value)
return c_x.value
else:
msg = "Variable int %s not found." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
def set_int(self, name: str, x: int, refresh: bool = False) -> None:
"""Set shader variable.
Set shader variable with given ``name`` and of the type ``int``. Raytrace
the whole scene if refresh is set to ``True``.
Note, shader variables distinguish ``int`` and ``uint`` while the type
provided by Python methods is ``int`` in both cases.
Parameters
----------
name : string
Variable name.
x : int
Variable value.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
"""
if not isinstance(name, str): name = str(name)
if not isinstance(x, int): x = int(x)
self._optix.set_int(name, x, refresh)
def set_texture_1d(self, name: str, data: Any,
addr_mode: Union[TextureAddressMode, str] = TextureAddressMode.Clamp,
keep_on_host: bool = False,
refresh: bool = False) -> None:
"""Set texture data.
Set texture ``name`` data. Texture format (float, float2, float4 or byte, byte2, byte4)
and length are deduced from the ``data`` array shape and dtype. Use ``keep_on_host=True``
to make a copy of data in the host memory (in addition to GPU memory), this
option is required when (small) textures are going to be saved to JSON description
of the scene.
Parameters
----------
name : string
Texture name.
data : array_like
Texture data.
addr_mode : TextureAddressMode or string, optional
Texture addressing mode on edge crossing.
keep_on_host : bool, optional
Store texture data copy in the host memory.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
"""
if not isinstance(name, str): name = str(name)
if not isinstance(data, np.ndarray): data = np.ascontiguousarray(data)
if isinstance(addr_mode, str): addr_mode = TextureAddressMode[addr_mode]
if data.dtype != np.uint8: # everything not explicitly given as uin8 upload as float32
if len(data.shape) == 1: rt_format = RtFormat.Float
elif len(data.shape) == 2:
if data.shape[1] == 1: rt_format = RtFormat.Float
elif data.shape[1] == 2: rt_format = RtFormat.Float2
elif data.shape[1] == 4: rt_format = RtFormat.Float4
else:
msg = "Texture 1D shape should be (length,n), where n=1,2,4."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
else:
msg = "Texture 1D shape should be (length,) or (length,n), where n=1,2,4."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if data.dtype != np.float32: data = np.ascontiguousarray(data, dtype=np.float32)
if not data.flags['C_CONTIGUOUS']: data = np.ascontiguousarray(data, dtype=np.float32)
else:
if len(data.shape) == 1: rt_format = RtFormat.UByte
elif len(data.shape) == 2:
if data.shape[1] == 1: rt_format = RtFormat.UByte
elif data.shape[1] == 2: rt_format = RtFormat.UByte2
elif data.shape[1] == 4: rt_format = RtFormat.UByte4
else:
msg = "Texture 1D shape should be (length,n), where n=1,2,4."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
else:
msg = "Texture 1D shape should be (length,) or (length,n), where n=1,2,4."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if not data.flags['C_CONTIGUOUS']: data = np.ascontiguousarray(data, dtype=np.uint8)
self._logger.info("Set texture 1D %s: length=%d, format=%s.", name, data.shape[0], rt_format.name)
if not self._optix.set_texture_1d(name, data.ctypes.data, data.shape[0], rt_format.value, addr_mode.value, keep_on_host, refresh):
msg = "Texture 1D %s not uploaded." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def set_texture_2d(self, name: str, data: Any,
addr_mode: Union[TextureAddressMode, str] = TextureAddressMode.Wrap,
keep_on_host: bool = False,
refresh: bool = False) -> None:
"""Set texture data.
Set texture ``name`` data. Texture format (float, float2, float4 or byte, byte2, byte4)
and width/height are deduced from the ``data`` array shape and dtype. Use ``keep_on_host=True``
to make a copy of data in the host memory (in addition to GPU memory), this
option is required when (small) textures are going to be saved to JSON description
of the scene.
Parameters
----------
name : string
Texture name.
data : array_like
Texture data.
addr_mode : TextureAddressMode or string, optional
Texture addressing mode on edge crossing.
keep_on_host : bool, optional
Store texture data copy in the host memory.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
"""
if not isinstance(name, str): name = str(name)
if not isinstance(data, np.ndarray): data = np.ascontiguousarray(data)
if isinstance(addr_mode, str): addr_mode = TextureAddressMode[addr_mode]
if data.dtype != np.uint8: # everything not explicitly given as uin8 upload as float32
if len(data.shape) == 2: rt_format = RtFormat.Float
elif len(data.shape) == 3:
if data.shape[2] == 1: rt_format = RtFormat.Float
elif data.shape[2] == 2: rt_format = RtFormat.Float2
elif data.shape[2] == 4: rt_format = RtFormat.Float4
else:
msg = "Texture 2D shape should be (height,width,n), where n=1,2,4."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
else:
msg = "Texture 2D shape should be (height,width) or (height,width,n), where n=1,2,4."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if data.dtype != np.float32: data = np.ascontiguousarray(data, dtype=np.float32)
if not data.flags['C_CONTIGUOUS']: data = np.ascontiguousarray(data, dtype=np.float32)
else: # uint8
if len(data.shape) == 2: rt_format = RtFormat.UByte
elif len(data.shape) == 3:
if data.shape[2] == 1: rt_format = RtFormat.UByte
elif data.shape[2] == 2: rt_format = RtFormat.UByte2
elif data.shape[2] == 4: rt_format = RtFormat.UByte4
else:
msg = "Texture 2D shape should be (height,width,n), where n=1,2,4."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
else:
msg = "Texture 2D shape should be (height,width) or (height,width,n), where n=1,2,4."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if not data.flags['C_CONTIGUOUS']: data = np.ascontiguousarray(data, dtype=np.uint8)
self._logger.info("Set texture 2D %s: %d x %d, format=%s.", name, data.shape[1], data.shape[0], rt_format.name)
if not self._optix.set_texture_2d(name, data.ctypes.data, data.shape[1], data.shape[0], rt_format.value, addr_mode.value, keep_on_host, refresh):
msg = "Texture 2D %s not uploaded." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def load_texture(self, tex_name: str, file_name: str,
rt_format: RtFormat = RtFormat.Float4,
prescale: float = 1.0,
baseline: float = 0.0,
exposure: float = 1.0,
gamma: float = 1.0,
addr_mode: Union[TextureAddressMode, str] = TextureAddressMode.Wrap,
keep_on_host: bool = False,
refresh: bool = False) -> None:
"""Load texture from file.
Parameters
----------
tex_name : string
Texture name.
file_name : string
Source image file.
rt_format: RtFormat, optional
Target format of the texture.
prescale : float, optional
Scaling factor for color values.
baseline : float, optional
Baseline added to color values.
exposure : float, optional
Exposure value used in the postprocessing.
gamma : float, optional
Gamma value used in the postprocessing.
addr_mode : TextureAddressMode or string, optional
Texture addressing mode on edge crossing.
keep_on_host : bool, optional
Store texture data copy in the host memory.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
Examples
--------
>>> optix = TkOptiX()
>>> optix.load_texture("rainbow", "data/rainbow.jpg") # set gray background
"""
if isinstance(rt_format, str): rt_format = RtFormat[rt_format]
if isinstance(addr_mode, str): addr_mode = TextureAddressMode[addr_mode]
if not self._optix.load_texture_2d(tex_name, file_name, prescale, baseline, exposure, gamma, rt_format.value, addr_mode.value, refresh):
msg = "Failed on reading texture from file %s." % file_name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
def set_normal_tilt(self, name: str, data: Any,
mapping: Union[TextureMapping, str] = TextureMapping.Flat,
addr_mode: Union[TextureAddressMode, str] = TextureAddressMode.Wrap,
keep_on_host: bool = False,
refresh: bool = False) -> None:
"""Set normal tilt data.
Set shading normal tilt according to displacement data for the material ``name``. The ``data``
has to be a 2D array containing displacement mapping. ``mapping`` determines how the normal tilt
is calculated from the displacement map (see :class:`plotoptix.enums.TextureMapping`).
Use ``keep_on_host=True`` to make a copy of data in the host memory (in addition to GPU
memory), this option is required when (small) arrays are going to be saved to JSON
description of the scene.
Parameters
----------
name : string
Object name.
data : array_like
Displacement map data.
mapping : TextureMapping or string, optional
Mapping mode (see :class:`plotoptix.enums.TextureMapping`).
addr_mode : TextureAddressMode or string, optional
Texture addressing mode on edge crossing.
keep_on_host : bool, optional
Store texture data copy in the host memory.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
"""
if not isinstance(name, str): name = str(name)
if not isinstance(data, np.ndarray): data = np.ascontiguousarray(data, dtype=np.float32)
if isinstance(mapping, str): mapping = TextureMapping[mapping]
if isinstance(addr_mode, str): addr_mode = TextureAddressMode[addr_mode]
if len(data.shape) != 2:
msg = "Data shape should be (height,width)."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if data.dtype != np.float32: data = np.ascontiguousarray(data, dtype=np.float32)
if not data.flags['C_CONTIGUOUS']: data = np.ascontiguousarray(data, dtype=np.float32)
self._logger.info("Set shading normal tilt map for %s: %d x %d.", name, data.shape[1], data.shape[0])
if not self._optix.set_normal_tilt(name, data.ctypes.data, data.shape[1], data.shape[0],
mapping.value, addr_mode.value, keep_on_host, refresh):
msg = "%s normal tilt map not uploaded." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def load_normal_tilt(self, name: str, file_name: str,
mapping: Union[TextureMapping, str] = TextureMapping.Flat,
addr_mode: Union[TextureAddressMode, str] = TextureAddressMode.Wrap,
prescale: float = 1.0,
baseline: float = 0.0,
refresh: bool = False) -> None:
"""Set normal tilt data.
Set shading normal tilt according to displacement map loaded from an image file. ``mapping``
determines how the normal tilt is calculated from the displacement data
(see :class:`plotoptix.enums.TextureMapping`). Tilt data is stored in the device memory only
(there is no host copy).
Parameters
----------
name : string
Material name.
file_name : string
Image file name with the displacement data.
mapping : TextureMapping or string, optional
Mapping mode (see :class:`plotoptix.enums.TextureMapping`).
addr_mode : TextureAddressMode or string, optional
Texture addressing mode on edge crossing.
prescale : float, optional
Scaling factor for displacement values.
baseline : float, optional
Baseline added to displacement values.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
"""
if not isinstance(name, str): name = str(name)
if not isinstance(file_name, str): name = str(file_name)
if isinstance(mapping, str): mapping = TextureMapping[mapping]
if isinstance(addr_mode, str): addr_mode = TextureAddressMode[addr_mode]
self._logger.info("Set shading normal tilt map for %s using %s.", name, file_name)
if not self._optix.load_normal_tilt(name, file_name, mapping.value, addr_mode.value, prescale, baseline, refresh):
msg = "%s normal tilt map not uploaded." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def set_displacement(self, name: str, data: Any,
addr_mode: Union[TextureAddressMode, str] = TextureAddressMode.Wrap,
keep_on_host: bool = False,
refresh: bool = False) -> None:
"""Set surface displacement data.
Set displacement data for the object ``name``. Geometry attribute program of the object
has to be set to :attr:`plotoptix.enums.GeomAttributeProgram.DisplacedSurface`. The ``data``
has to be a 2D array containing displacement map.
Use ``keep_on_host=True`` to make a copy of data in the host memory (in addition to GPU
memory), this option is required when (small) arrays are going to be saved to JSON
description of the scene.
Parameters
----------
name : string
Object name.
data : array_like
Displacement map data.
addr_mode : TextureAddressMode or string, optional
Texture addressing mode on edge crossing.
keep_on_host : bool, optional
Store texture data copy in the host memory.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
"""
if not isinstance(name, str): name = str(name)
if not isinstance(data, np.ndarray): data = np.ascontiguousarray(data, dtype=np.float32)
if isinstance(addr_mode, str): addr_mode = TextureAddressMode[addr_mode]
if len(data.shape) != 2:
msg = "Data shape should be (height,width)."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if data.dtype != np.float32: data = np.ascontiguousarray(data, dtype=np.float32)
if not data.flags['C_CONTIGUOUS']: data = np.ascontiguousarray(data, dtype=np.float32)
self._logger.info("Set displacement map for %s: %d x %d.", name, data.shape[1], data.shape[0])
if not self._optix.set_displacement(name, data.ctypes.data, data.shape[1], data.shape[0],
addr_mode.value, keep_on_host, refresh):
msg = "%s displacement map not uploaded." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def load_displacement(self, name: str, file_name: str,
prescale: float = 1.0,
baseline: float = 0.0,
addr_mode: Union[TextureAddressMode, str] = TextureAddressMode.Wrap,
refresh: bool = False) -> None:
"""Load surface displacement data from file.
Load displacement data for the object ``name`` from an image file. Geometry attribute
program of the object has to be set to :attr:`plotoptix.enums.GeomAttributeProgram.DisplacedSurface`.
Tilt data is stored in the device memory only (there is no host copy).
Parameters
----------
name : string
Object name.
file_name : string
Image file name with the displacement data.
prescale : float, optional
Scaling factor for displacement values.
baseline : float, optional
Baseline added to displacement values.
addr_mode : TextureAddressMode or string, optional
Texture addressing mode on edge crossing.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
"""
if not isinstance(name, str): name = str(name)
if not isinstance(file_name, str): name = str(file_name)
if isinstance(addr_mode, str): addr_mode = TextureAddressMode[addr_mode]
self._logger.info("Set displacement map for %s using %s.", name, file_name)
if not self._optix.load_displacement(name, file_name, prescale, baseline, addr_mode.value, refresh):
msg = "%s displacement map not uploaded." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def get_background_mode(self) -> Optional[MissProgram]:
"""Get currently configured miss program.
Returns
-------
out : MissProgram or None
Miss program, see :py:mod:`plotoptix.enums.MissProgram`, or
`None` if reading the mode failed.
See Also
--------
:py:mod:`plotoptix.enums.MissProgram`
"""
miss = self._optix.get_miss_program()
if miss >= 0:
mode = MissProgram(miss)
self._logger.info("Current miss program is: %s", mode.name)
return mode
else:
msg = "Failed on reading the miss program."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
return None
def set_background_mode(self, mode: Union[MissProgram, str], refresh: bool = False) -> None:
"""Set miss program.
Parameters
----------
mode : MissProgram enum or string
Miss program, see :py:mod:`plotoptix.enums.MissProgram`.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
See Also
--------
:py:mod:`plotoptix.enums.MissProgram`
"""
if isinstance(mode, str): mode = MissProgram[mode]
if self._optix.set_miss_program(mode.value, refresh):
self._logger.info("Miss program %s is selected.", mode.name)
else:
msg = "Miss program setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def get_background(self) -> (float, float, float):
"""Get background color.
**Note**, currently returns constant background color also in texture
based background modes.
Returns
-------
out : tuple (float, float, float)
Color values (r, g, b) of the background color.
"""
return self.get_float3("bg_color")
def set_background(self, bg: Any,
rt_format: Union[RtFormat, str] = RtFormat.Float4,
prescale: float = 1.0,
baseline: float = 0.0,
exposure: float = 1.0,
gamma: float = 1.0,
keep_on_host: bool = False,
refresh: bool = False) -> None:
"""Set background color.
Set background color or texture (shader variable ``bg_color``, texture
``bg_texture`` or ``bg_texture8`` depending on the ``rt_format``). Raytrace
the whole scene if refresh is set to ``True``. Texture should be provided as
an array of shape ``(height, width, n)``, where ``n`` is 3 or 4. 3-component
RGB arrays are extended to 4-component RGBA shape (alpha channel is reserved
for future implementations).
Function attempts to load texture from file if ``bg`` is a string.
Color values are corrected to account for the postprocessing tone
mapping if ``exposure`` and ``gamma`` values are provided.
Use ``keep_on_host=True`` to make a copy of data in the host memory (in addition
to GPU memory), this option is required when (small) textures are going to be saved
to JSON description of the scene.
Note, color components range is <0; 1>.
Parameters
----------
bg : Any
New backgroud color or texture data; single value is a grayscale level,
RGB color components can be provided as an array-like values, texture
is provided as an array of shape ``(height, width, n)`` or string
with the source image file path.
rt_format: RtFormat, optional
Target format of the texture.
prescale : float, optional
Scaling factor for color values.
baseline : float, optional
Baseline added to color values.
exposure : float, optional
Exposure value used in the postprocessing.
gamma : float, optional
Gamma value used in the postprocessing.
keep_on_host : bool, optional
Store texture data copy in the host memory.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
Examples
--------
>>> optix = TkOptiX()
>>> optix.set_background(0.5) # set gray background
>>> optix.set_background([0.5, 0.7, 0.9]) # set light bluish background
"""
if isinstance(rt_format, str): rt_format = RtFormat[rt_format]
if rt_format == RtFormat.Float4:
bg_name = "bg_texture"
elif rt_format == RtFormat.UByte4:
bg_name = "bg_texture8"
else:
msg = "Background texture format should be Float4 or UByte4."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
if isinstance(bg, str):
if self._optix.load_texture_2d(bg_name, bg,
prescale, baseline, exposure, gamma,
rt_format.value,
TextureAddressMode.Mirror.value,
False):
if not self._optix.set_bg_texture(bg_name, refresh):
msg = "Background texture %s not set." % bg_name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
self._logger.info("Background texture loaded from file.")
else:
msg = "Failed on reading background texture."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
e = 1 / exposure
if isinstance(bg, float) or isinstance(bg, int):
x = float(bg); x = e * np.power(x, gamma)
y = float(bg); y = e * np.power(y, gamma)
z = float(bg); z = e * np.power(z, gamma)
if self._optix.set_float3("bg_color", x, y, z, refresh):
self._logger.info("Background constant gray level updated.")
else:
msg = "Failed on updating background color."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if not isinstance(bg, np.ndarray):
bg = np.ascontiguousarray(bg)
if (len(bg.shape) == 1) and (bg.shape[0] == 3):
x = e * np.power(bg[0], gamma)
y = e * np.power(bg[1], gamma)
z = e * np.power(bg[2], gamma)
if self._optix.set_float3("bg_color", x, y, z, refresh):
self._logger.info("Background constant color updated.")
else:
msg = "Failed on updating background color."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if len(bg.shape) == 3:
if bg.shape[-1] == 3:
b = np.zeros((bg.shape[0], bg.shape[1], 4), dtype=bg.dtype)
b[...,:-1] = bg
bg = b
if bg.shape[-1] == 4:
if gamma != 1:
if bg.dtype != np.float32 and bg.dtype != np.float64:
bg = bg.astype(dtype=np.float32)
bg *= 1.0/255.0
bg = np.power(bg, gamma)
if e != 1:
if bg.dtype != np.float32 and bg.dtype != np.float64:
bg = bg.astype(dtype=np.float32)
bg *= 1.0/255.0
bg *= e
if rt_format == RtFormat.Float4 and bg.dtype != np.float32:
bg = bg.astype(dtype=np.float32)
elif rt_format == RtFormat.UByte4 and bg.dtype != np.uint8:
if bg.dtype == np.float32 or bg.dtype == np.float64:
bg *= 255.0
np.clip(bg, 0.0, 255.0, out=bg)
bg = bg.astype(dtype=np.uint8)
self.set_texture_2d(bg_name, bg, addr_mode=TextureAddressMode.Mirror, keep_on_host=keep_on_host, refresh=False)
if not self._optix.set_bg_texture(bg_name, refresh):
msg = "Background texture %s not set." % bg_name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
self._logger.info("Background texture %s updated." % bg_name)
return
msg = "Background should be a single gray level or [r,g,b] array_like or 2D array_like of [r,g,b]/[r,g,b,a] values."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
def get_ambient(self) -> (float, float, float):
"""Get ambient color.
Returns
-------
out : tuple (float, float, float)
Color values (r, g, b) of the ambient light color.
"""
return self.get_float3("ambient_color")
def set_ambient(self, color: Any, refresh: bool = False) -> None:
"""Set ambient light color.
Set ambient light color of the scene (shader variable ``ambient_color``,
default value is [0.86, 0.89, 0.94]). Raytrace the whole scene if
refresh is set to ``True``.
Note, color components range is <0; 1>.
Parameters
----------
color : Any
New ambient light color value; single value is a grayscale level,
RGB color components can be provided as array-like values.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
Examples
--------
>>> optix = TkOptiX()
>>> optix.set_ambient(0.5) # set dim gray light
>>> optix.set_ambient([0.1, 0.2, 0.3]) # set dim bluish light
"""
if isinstance(color, float) or isinstance(color, int):
x = float(color)
y = float(color)
z = float(color)
else:
if not isinstance(color, np.ndarray):
color = np.asarray(color, dtype=np.float32)
if (len(color.shape) != 1) or (color.shape[0] != 3):
msg = "Color should be a single value or 3-element array/list/tupe."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
x = color[0]
y = color[1]
z = color[2]
self._optix.set_float3("ambient_color", x, y, z, refresh)
self._logger.info("Ambient color updated.")
def get_param(self, name: str) -> Optional[Any]:
"""Get raytracer parameter.
Available parameters:
- ``compute_timeout``
- ``light_shading``
- ``max_accumulation_frames``
- ``min_accumulation_step``
- ``rt_timeout``
- ``save_albedo``
- ``save_normals``
Parameters
----------
name : string
Parameter name.
Returns
-------
out : Any, optional
Value of the parameter or ``None`` if parameter not found.
Examples
--------
>>> optix = TkOptiX()
>>> print(optix.get_param("max_accumulation_frames"))
See Also
--------
:meth:`plotoptix.NpOptiX.set_param`
"""
try:
v = None
self._padlock.acquire()
if name == "min_accumulation_step":
v = self._optix.get_min_accumulation_step()
elif name == "max_accumulation_frames":
v = self._optix.get_max_accumulation_frames()
elif name == "light_shading":
shading = self._optix.get_light_shading()
if shading >= 0: v = LightShading(shading)
elif name == "compute_timeout":
v = self._optix.get_compute_timeout()
elif name == "rt_timeout":
v = self._optix.get_rt_timeout()
elif name == "save_albedo":
v = self._optix.get_save_albedo()
elif name == "save_normals":
v = self._optix.get_save_normals()
else:
msg = "Unknown parameter " + name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
self._logger.info("Value of %s is %s", name, v)
return v
def set_param(self, **kwargs) -> None:
"""Set raytracer parameter(s).
Available parameters:
- ``compute_timeout``: timeout for the computation thread
Set this parameter if the computations performed in the scene_compute
callback are longer than the frame ray tracing. See also
:meth:`plotoptix.NpOptiX.set_scene_compute_cb`.
- ``light_shading``: light shading mode.
Use :attr:`plotoptix.enums.LightShading.Hard` for best caustics or
:attr:`plotoptix.enums.LightShading.Soft` for fast convergence. String
names ``"Hard"`` and ``"Soft"`` are accepted.
Set mode before adding lights.
- ``max_accumulation_frames``
Number of accumulation frames computed for the scene.
- ``min_accumulation_step``
Number of accumulation frames computed in a single step (before each
image refresh).
- ``rt_timeout``
Ray tracing timeout. Default value is 30000 (30s).
- ``save_albedo``
Allocate buffer and collect albedo information if set to `True`.
If set to `False` then buffer is allocated only if denoiser requires it.
- ``save_normals``
Allocate buffer and collect normals if set to `True`. If set to `False`
then buffer is allocated only if denoiser requires it.
Parameters
----------
kwargs : Any
Values of parameters corresponding to provided names.
Examples
--------
>>> optix = TkOptiX()
>>> optix.set_param(min_accumulation_step=4, max_accumulation_frames=200)
"""
try:
self._padlock.acquire()
for key, value in kwargs.items():
self._logger.info("Set %s to %s", key, value)
if key == "min_accumulation_step":
self._optix.set_min_accumulation_step(int(value))
elif key == "max_accumulation_frames":
self._optix.set_max_accumulation_frames(int(value))
elif key == "light_shading":
if len(self.light_handles) > 0:
msg = "Light shading has to be selected before adding lights."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
continue
if isinstance(value, str): mode = LightShading[value]
else: mode = value
self._optix.set_light_shading(mode.value)
elif key == "compute_timeout":
self._optix.set_compute_timeout(int(value))
elif key == "rt_timeout":
self._optix.set_rt_timeout(int(value))
elif key == "save_albedo":
self._optix.set_save_albedo(bool(value))
elif key == "save_normals":
self._optix.set_save_normals(bool(value))
else:
msg = "Unknown parameter " + key
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def get_scene(self) -> dict:
"""Get dictionary with the scene description.
Returns a dictionary with the scene description. Geometry objects,
materials, lights, texture data or file names, cameras, postprocessing
and scene parameters are included. Callback functions and vieport dimensions
are not saved. Existing files are overwritten.
Returns
-------
out : dict, optional
Dictionary with the scene description.
"""
try:
self._padlock.acquire()
s = self._optix.save_scene_to_json()
if len(s) > 2: return json.loads(s)
else: return {}
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def _init_scene_metadata(self) -> bool:
s = self._optix.get_scene_metadata()
if len(s) > 2: meta = json.loads(s)
else:
self._logger.error("Scene loading failed.")
return False
self.geometry_data = {} # geometry name to handle dictionary
self.geometry_names = {} # geometry handle to name dictionary
if "Geometry" in meta:
for key, value in meta["Geometry"].items():
self.geometry_data[key] = GeometryMeta(key, value["Handle"], value["Size"])
self.geometry_names[value["Handle"]] = key
else: return False
self.camera_handles = {} # camera name to handle dictionary
self.camera_names = {} # camera handle to name dictionary
if "Cameras" in meta:
for key, value in meta["Cameras"].items():
self.camera_handles[key] = value
self.camera_names[value] = key
else: return False
self.light_handles = {} # light name to handle dictionary
self.light_names = {} # light handle to name dictionary
if "Lights" in meta:
for key, value in meta["Lights"].items():
self.light_handles[key] = value
self.light_names[value] = key
else: return False
return True
def set_scene(self, scene: dict) -> None:
"""Setup scene using description in provided dictionary.
Set new scene using provided description (and destroy current scene). Geometry
objects, materials, lights, texture data or file names, cameras, postprocessing
and scene parameters are replaced. Callback functions and vieport dimensions are
preserved.
Note: locations of external resources loaded from files (e.g. textures) are saved
as relative paths, ensure your working directory matches these locations.
Parameters
----------
scene : dict
Dictionary with the scene description.
"""
s = json.dumps(scene)
with self._padlock:
self._logger.info("Loading new scene from dictionary.")
if self._optix.load_scene_from_json(s) and self._init_scene_metadata():
self.update_device_buffers()
self._logger.info("New scene ready.")
else:
msg = "Scene loading failed."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
def load_scene(self, file_name: str) -> None:
"""Load scene description from JSON file.
Load new scene from JSON file (and destroy current scene). Geometry objects,
materials, lights, texture data or file names, cameras, postprocessing and
scene parameters are replaced. Callback functions and vieport dimensions are
preserved.
Parameters
----------
file_name : str
Input file name.
"""
if not os.path.isfile(file_name):
msg = "File %s not found." % file_name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
wd = os.getcwd()
if os.path.isabs(file_name):
d, f = os.path.split(file_name)
os.chdir(d)
else:
f = file_name
with self._padlock:
self._logger.info("Loading new scene from file %s.", file_name)
if self._optix.load_scene_from_file(f) and self._init_scene_metadata():
self.update_device_buffers()
self._logger.info("New scene ready.")
else:
msg = "Scene loading failed."
self._logger.error(msg)
if self._raise_on_error:
os.chdir(wd)
raise ValueError(msg)
os.chdir(wd)
def save_scene(self, file_name: str) -> None:
"""Save scene description to JSON file.
Save description of the scene to file. Geometry objects, materials, lights,
texture data or file names, cameras, postprocessing and scene parameters
are included. Callback functions and vieport dimensions are not saved.
Existing files are overwritten.
Parameters
----------
file_name : str
Output file name.
"""
try:
self._padlock.acquire()
if not self._optix.save_scene_to_file(file_name):
msg = "Scene not saved."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def save_image(self, file_name: str,
bps: Union[ChannelDepth, str] = ChannelDepth.Bps8) -> None:
"""Save current image to file.
Save current content of the image buffer to a file. Accepted formats,
recognized by the extension used in the ``file_name``, are:
- bmp, gif, png, jpg, and tif for 8bps color depth,
- png (Windows only), and tif for 16bps color depth,
- tif for 32bps hdr images.
Existing files are overwritten.
Parameters
----------
file_name : str
Output file name.
bps : ChannelDepth enum or string, optional
Color depth.
See Also
--------
:class:`plotoptix.enums.ChannelDepth`
"""
if isinstance(bps, str): bps = ChannelDepth[bps]
try:
self._padlock.acquire()
if bps == ChannelDepth.Bps8:
ok = self._optix.save_image_to_file(file_name)
elif bps == ChannelDepth.Bps16:
ok = self._optix.save_image_to_file_16bps(file_name)
elif bps == ChannelDepth.Bps32:
ok = self._optix.save_image_to_file_32bps(file_name)
else:
ok = False
if not ok:
msg = "Image not saved."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def encoder_create(self, fps: int, bitrate: float = 2,
idrrate: Optional[int] = None,
profile: Union[NvEncProfile, str] = NvEncProfile.Default,
preset: Union[NvEncPreset, str] = NvEncPreset.Default) -> None:
"""Create video encoder.
Create and configure video encoder for this raytracer instance. Only one encoder
per raytracer instance is supported now. Specifying ``preset`` overrides ``bitrate``
settings. Beware that some combinations are not supported by all players
(e.g. lossless encoding is not playable in Windows Media Player).
Parameters
----------
fps : int
Frames per second assumed in the output file.
bitrate : float, optional
Constant bitrate of the encoded stream, in Mbits to save you typing 0's.
idrrate : int, optional
Instantaneous Decode Refresh frame interval. 2 seconds interval is used if
``idrrate`` is not provided.
profile : NvEncProfile enum or string, optional
H.264 encoding profile.
preset : NvEncPreset enum or string, optional
H.264 encoding preset, overrides ``bitrate`` settings.
See Also
--------
:class:`plotoptix.enums.NvEncProfile`, :class:`plotoptix.enums.NvEncPreset`
"""
if idrrate is None: idrrate = 2 * fps
if isinstance(profile, str): profile = NvEncProfile[profile]
if isinstance(preset, str): preset = NvEncPreset[preset]
try:
self._padlock.acquire()
if not self._optix.encoder_create(fps, int(1000000 * bitrate), idrrate, profile.value, preset.value):
msg = "Encoder not created."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def encoder_start(self, out_name: str, n_frames: int = 0) -> None:
"""Start video encoding.
Start encoding to MP4 file with provided name. Total number of frames
can be optionally limited. Output file is overwritten if it already exists.
New file is created and encoding is restarted if method is launched
during previously started encoding.
Parameters
----------
out_name : str
Output file name.
n_frames : int, optional
Maximum number of frames to encode if ``n_frames`` or unlimited
encoding when default value is used.
"""
try:
self._padlock.acquire()
if not self._optix.encoder_start(out_name, n_frames):
msg = "Encoder not started."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def encoder_stop(self) -> None:
"""Stop video encoding.
Stop encoding and close the output file (can happen before configured
total number of frames to encode).
"""
try:
self._padlock.acquire()
if not self._optix.encoder_stop():
msg = "Encoder not stopped."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def encoder_is_open(self) -> bool:
"""Encoder is encoding.
Returns
-------
out : bool
``True`` if encoder is encoding.
"""
return self._optix.encoder_is_open()
def encoded_frames(self) -> int:
"""Number of encoded video frames.
Returns
-------
out : int
Number of frames.
"""
n = self._optix.encoded_frames()
if n < 0:
msg = "Number of encoded frames unavailable."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return n
def encoding_frames(self) -> int:
"""Number of frames to encode.
Returns
-------
out : int
Number of frames.
"""
n = self._optix.encoding_frames()
if n < 0:
msg = "Number of frames to encode unavailable."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return n
def get_camera_names(self) -> list:
"""Return list of cameras' names.
"""
return list(self.camera_handles.keys())
def get_camera_name_handle(self, name: Optional[str] = None) -> (Optional[str], Optional[int]):
"""Get camera name and handle.
Mostly for the internal use.
Parameters
----------
name : string, optional
Camera name; current camera is used if name not provided.
Returns
-------
out : tuple (name, handle)
Name and handle of the camera or ``(None, None)`` if camera not found.
"""
cam_handle = 0
if name is None: # try current camera
cam_handle = self._optix.get_current_camera()
if cam_handle == 0:
msg = "Current camera is not set."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None, None
for n, h in self.camera_handles.items():
if h == cam_handle:
name = n
break
else: # try camera by name
if not isinstance(name, str): name = str(name)
if name in self.camera_handles:
cam_handle = self.camera_handles[name]
else:
msg = "Camera %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None, None
return name, cam_handle
def get_camera(self, name: Optional[str] = None) -> Optional[dict]:
"""Get camera parameters.
Parameters
----------
name : string, optional
Name of the camera, use current camera if name not provided.
Returns
-------
out : dict, optional
Dictionary of the camera parameters or ``None`` if failed on
accessing camera data.
"""
name, cam_handle = self.get_camera_name_handle(name)
if name is None: return None
s = self._optix.get_camera(cam_handle)
if len(s) > 2: return json.loads(s)
else:
msg = "Failed on reading camera %s." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
def get_camera_eye(self, name: Optional[str] = None) -> Optional[np.ndarray]:
"""Get camera eye coordinates.
Parameters
----------
name : string, optional
Name of the camera, use current camera if name not provided.
Returns
-------
out : np.ndarray, optional
3D coordinates of the camera eye or None if failed on
accessing camera data.
"""
if name is not None and not isinstance(name, str): name = str(name)
name, cam_handle = self.get_camera_name_handle(name)
if name is None: return None
eye = np.ascontiguousarray([0, 0, 0], dtype=np.float32)
self._optix.get_camera_eye(cam_handle, eye.ctypes.data)
return eye
def get_camera_target(self, name: Optional[str] = None) -> Optional[np.ndarray]:
"""Get camera target coordinates.
Parameters
----------
name : string, optional
Name of the camera, use current camera if name not provided.
Returns
-------
out : np.ndarray, optional
3D coordinates of the camera target or ``None`` if failed on
accessing camera data.
"""
if name is not None and not isinstance(name, str): name = str(name)
name, cam_handle = self.get_camera_name_handle(name)
if name is None: return None
target = np.ascontiguousarray([0, 0, 0], dtype=np.float32)
self._optix.get_camera_target(cam_handle, target.ctypes.data)
return target
def get_camera_glock(self, name: Optional[str] = None) -> Optional[bool]:
"""Get camera gimbal lock state.
Parameters
----------
name : string, optional
Name of the camera, use current camera if name not provided.
Returns
-------
out : bool, optional
Gimbal lock state of the camera or ``None`` if failed on
accessing camera data.
"""
if name is not None and not isinstance(name, str): name = str(name)
name, cam_handle = self.get_camera_name_handle(name)
if name is None: return None
return self._optix.get_camera_glock(cam_handle)
def set_camera_glock(self, state: bool) -> None:
"""Set current camera's gimbal lock.
Parameters
----------
state : bool
Gimbal lock state.
"""
if not self._optix.set_camera_glock(state):
msg = "Camera gimbal lock not set."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def setup_camera(self, name: str,
eye: Optional[Any] = None,
target: Optional[Any] = None,
up: Optional[Any] = None,
cam_type: Union[Camera, str] = Camera.Pinhole,
aperture_radius: float = -1,
aperture_fract: float = 0.15,
focal_scale: float = -1,
chroma_l: float = 0.05,
chroma_t: float = 0.01,
fov: float = -1,
blur: float = 1,
glock: bool = False,
textures: Optional[Any] = None,
make_current: bool = True) -> None:
"""Setup new or update existing camera.
Note, parameters possible to update with this method are:
``eye``, ``target``, ``up``, ``aperture_radius``,
``focal_scale``, and ``fov``.
Parameters
----------
name : string
Name of the new camera.
eye : array_like, optional
Eye 3D position. Best fit for the current scene is computed if
argument is not provided. Ignored in camera modes with ray origins
stored in a texture.
target : array_like, optional
Target 3D position. Center of all geometries if argument not provided.
Ignored in camera modes with ray targets or directions stored in a texture.
up : array_like, optional
Up (vertical) direction. Y axis if argument not provided. Ignored in camera
modes with ray origins stored in a texture.
cam_type : Camera enum or string, optional
Type (pinhole, depth of field, ...), see :class:`plotoptix.enums.Camera`.
Cannot be changed after construction.
aperture_radius : float, optional
Aperture radius (increases focus blur for depth of field cameras).
aperture_fract : float, optional
Fraction of blind central spot of the aperture (results with ring-like
bokeh if > 0). Cannot be changed after construction.
focal_scale : float, optional
Focusing distance, relative to ``eye - target`` length.
chroma_l : float, optional
Longitudinal chromatic aberration strength, relative variation of the focusing
distance for different wavelengths. Use be a small positive value << 1.0. Default
is ``0.05``, use ``0.0`` for no aberration.
chroma_t : float, optional
Transverse chromatic aberration strength, relative variation of the lens
magnification for different wavelengths. Use be a small positive value << 1.0.
Default is ``0.01``, use ``0.0`` for no aberration.
fov : float, optional
Field of view in degrees.
blur : float, optional
Weight of the new frame in averaging with already accumulated frames.
Range is (0; 1>, lower values result with a higher motion blur, value
1.0 turns off the blur (default). Cannot be changed after construction.
glock : bool, optional
Gimbal lock state of the new camera.
textures : array_like, optional
List of textures used by the camera ray generation program.
make_current : bool, optional
Automatically switch to this camera if set to ``True``.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if isinstance(cam_type, str): cam_type = Camera[cam_type]
if name in self.camera_handles:
self.update_camera(name=name, eye=eye, target=target, up=up,
aperture_radius=aperture_radius,
focal_scale=focal_scale,
fov=fov)
return
if up is None: up = np.ascontiguousarray([0, 1, 0], dtype=np.float32)
if aperture_radius <= 0: aperture_radius = 0.1
if focal_scale <= 0: focal_scale = 1.0
if fov <= 0: fov = 35.0
eye_ptr = 0
eye = _make_contiguous_vector(eye, 3)
if eye is not None: eye_ptr = eye.ctypes.data
target_ptr = 0
target = _make_contiguous_vector(target, 3)
if target is not None: target_ptr = target.ctypes.data
up = _make_contiguous_vector(up, 3)
if up is None:
msg = "Need 3D camera up vector."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
tex_list = ""
if textures is not None: tex_list = ";".join(textures)
h = self._optix.setup_camera(name, cam_type.value,
eye_ptr, target_ptr, up.ctypes.data,
aperture_radius, aperture_fract,
focal_scale, chroma_l, chroma_t,
fov, blur, glock,
tex_list, make_current)
if h > 0:
self._logger.info("Camera %s handle: %d.", name, h)
self.camera_handles[name] = h
self.camera_names[h] = name
else:
msg = "Camera setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def update_camera(self, name: Optional[str] = None,
eye: Optional[Any] = None,
target: Optional[Any] = None,
up: Optional[Any] = None,
aperture_radius: float = -1.0,
focal_scale: float = -1.0,
fov: float = -1.0) -> None:
"""Update camera parameters.
Parameters
----------
name : string
Name of the camera to update.
eye : array_like, optional
Eye 3D position.
target : array_like, optional
Target 3D position.
up : array_like, optional
Up (vertical) direction.
aperture_radius : float, optional
Aperture radius (increases focus blur for depth of field cameras).
focal_scale : float, optional
Focus distance / (eye - target).length.
fov : float, optional
Field of view in degrees.
"""
name, cam_handle = self.get_camera_name_handle(name)
if (name is None) or (cam_handle == 0): return
eye = _make_contiguous_vector(eye, 3)
if eye is not None: eye_ptr = eye.ctypes.data
else: eye_ptr = 0
target = _make_contiguous_vector(target, 3)
if target is not None: target_ptr = target.ctypes.data
else: target_ptr = 0
up = _make_contiguous_vector(up, 3)
if up is not None: up_ptr = up.ctypes.data
else: up_ptr = 0
if self._optix.update_camera(name, eye_ptr, target_ptr, up_ptr,
aperture_radius, focal_scale, fov):
self._logger.info("Camera %s updated.", name)
else:
msg = "Camera %s update failed." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def get_current_camera(self) -> Optional[str]:
"""Get current camera name.
Returns
-------
out : string, optional
Name of the current camera or ``None`` if camera not set.
"""
cam_handle = self._optix.get_current_camera()
if cam_handle == 0:
msg = "Current camera is not set."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
if cam_handle not in self.camera_names:
msg = "Camera handle %d does not exists." % cam_handle
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
return self.camera_names[cam_handle]
def set_current_camera(self, name: str) -> None:
"""Switch to another camera.
Parameters
----------
name : string
Name of the new current camera.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name not in self.camera_handles:
msg = "Camera %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if self._optix.set_current_camera(name):
self._logger.info("Current camera: %s", name)
else:
msg = "Current camera not changed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def camera_fit(self,
camera: Optional[str] = None,
geometry: Optional[str] = None,
scale: float = 2.5) -> None:
"""Fit the camera eye and target to contain geometry in the field of view.
Parameters
----------
camera : string, optional
Name of the new camera to fit; current camera if name not provided.
geometry : string, optional
Name of the geometry to fit in view; all geometries if not provided.
scale : float, optional
Adjustment of the prefered distance (useful for wide angle cameras).
"""
camera, cam_handle = self.get_camera_name_handle(camera)
if camera is None: return
if geometry is not None:
if not isinstance(geometry, str): geometry = str(geometry)
else: geometry = ""
self._optix.fit_camera(cam_handle, geometry, scale)
def camera_move_by(self, shift: Tuple[float, float, float]) -> None:
"""Move current camera in the world coordinates.
Parameters
----------
shift : tuple (float, float, float)
(X, Y, Z) shift vector.
"""
if not self._optix.move_camera_by(shift[0], shift[1], shift[2]):
msg = "Camera move failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def camera_move_by_local(self, shift: Tuple[float, float, float]) -> None:
"""Move current camera in the camera coordinates.
Camera coordinates are: X to the right, Y up, Z towards camera.
Parameters
----------
shift : tuple (float, float, float)
(X, Y, Z) shift vector.
"""
if not self._optix.move_camera_by_local(shift[0], shift[1], shift[2]):
msg = "Camera move failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def camera_rotate_by(self,
rot: Tuple[float, float, float],
center: Tuple[float, float, float]) -> None:
"""Rotate current camera in the world coordinates about the center.
Rotation is done the world coordinates about Y, X, and then Z axis,
by the angles provided with ``rot = (rx, ry, rz)`` parameter.
Parameters
----------
rot : tuple (float, float, float)
Rotation around (X, Y, Z) axis.
center : tuple (float, float, float)
Rotation center.
"""
if not self._optix.rotate_camera_by(rot[0], rot[1], rot[2], center[0], center[1], center[2]):
msg = "Camera rotate failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def camera_rotate_by_local(self,
rot: Tuple[float, float, float],
center: Tuple[float, float, float]) -> None:
"""Rotate current camera in the camera coordinates about the center.
Rotation is done the camera coordinates about Y (camera up, yaw),
X (camera right, pitch), and then Z (towards camera, roll) axis,
by the angles provided with ``rot = (rx, ry, rz)`` parameter.
Parameters
----------
rot : tuple (float, float, float)
Rotation around (X, Y, Z) axis.
center : tuple (float, float, float)
Rotation center.
"""
if not self._optix.rotate_camera_by_local(rot[0], rot[1], rot[2], center[0], center[1], center[2]):
msg = "Camera rotate local failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def camera_rotate_eye(self, rot: Tuple[float, float, float]) -> None:
"""Rotate current camera eye about the target point in the world coordinates.
Rotation is done the world coordinates about Y, X, and then Z axis,
by the angles provided with ``rot = (rx, ry, rz)`` parameter.
Parameters
----------
rot : tuple (float, float, float)
Rotation around (X, Y, Z) axis.
"""
if not self._optix.rotate_camera_eye_by(rot[0], rot[1], rot[2]):
msg = "Camera rotate eye failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def camera_rotate_eye_local(self, rot: Tuple[float, float, float]) -> None:
"""Rotate current camera eye about the target point in the camera coordinates.
Rotation is done the camera coordinates about Y (camera up, yaw),
X (camera right, pitch), and then Z (towards camera, roll) axis,
by the angles provided with ``rot = (rx, ry, rz)`` parameter.
Parameters
----------
rot : tuple (float, float, float)
Rotation around (X, Y, Z) axis.
"""
if not self._optix.rotate_camera_eye_by_local(rot[0], rot[1], rot[2]):
msg = "Camera rotate eye local failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def camera_rotate_target(self, rot: Tuple[float, float, float]) -> None:
"""Rotate current camera target about the eye point in the world coordinates.
Rotation is done the world coordinates about Y, X, and then Z axis,
by the angles provided with ``rot = (rx, ry, rz)`` parameter.
Parameters
----------
rot : tuple (float, float, float)
Rotation around (X, Y, Z) axis.
"""
if not self._optix.rotate_camera_tgt_by(rot[0], rot[1], rot[2]):
msg = "Camera rotate target failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def camera_rotate_target_local(self, rot: Tuple[float, float, float]) -> None:
"""Rotate current camera target about the eye point in the camera coordinates.
Rotation is done the camera coordinates about Y (camera up, yaw),
X (camera right, pitch), and then Z (towards camera, roll) axis,
by the angles provided with ``rot = (rx, ry, rz)`` parameter.
Parameters
----------
rot : tuple (float, float, float)
Rotation around (X, Y, Z) axis.
"""
if not self._optix.rotate_camera_tgt_by_local(rot[0], rot[1], rot[2]):
msg = "Camera rotate target failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def get_light_names(self) -> list:
"""Return list of lights' names.
"""
return list(self.light_handles.keys())
def get_light_shading(self) -> Optional[LightShading]:
"""Get light shading mode.
Deprecated, use ``get_param("light_shading")`` instead.
Returns
----------
out : LightShading or None
Light shading mode. ``None`` is returned if function could
not read the mode from the raytracer.
See Also
--------
:meth:`plotoptix.NpOptiX.get_param`
"""
self._logger.warn("Deprecated, use get_param(\"light_shading\") instead.")
return self.get_param("light_shading")
def set_light_shading(self, mode: Union[LightShading, str]) -> None:
"""Set light shading mode.
Deprecated, use ``set_param(light_shading=mode)`` instead.
See Also
--------
:meth:`plotoptix.NpOptiX.set_param`
"""
self._logger.warn("Deprecated, use set_param(light_shading=mode) instead.")
self.set_param(light_shading=mode)
def get_light_pos(self, name: Optional[str] = None) -> Optional[np.ndarray]:
"""Get light 3D position.
Parameters
----------
name : string, optional
Name of the light (last added light if ``None``).
Returns
-------
out : np.ndarray, optional
3D of the light or ``None`` if failed on accessing light data.
"""
if name is None:
if len(self.light_handles) > 0: name = list(self.light_handles.keys())[-1]
else: raise ValueError()
if not isinstance(name, str): name = str(name)
if name not in self.light_handles:
msg = "Light %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
pos = np.ascontiguousarray([0, 0, 0], dtype=np.float32)
self._optix.get_light_pos(name, pos.ctypes.data)
return pos
def get_light_color(self, name: Optional[str] = None) -> Optional[np.ndarray]:
"""Get light color.
Parameters
----------
name : string, optional
Name of the light (last added light if ``None``).
Returns
-------
out : np.ndarray, optional
Light color RGB or ``None`` if failed on accessing light data.
"""
if name is None:
if len(self.light_handles) > 0: name = list(self.light_handles.keys())[-1]
else: raise ValueError()
if not isinstance(name, str): name = str(name)
if name not in self.light_handles:
msg = "Light %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
col = np.ascontiguousarray([0, 0, 0], dtype=np.float32)
self._optix.get_light_color(name, col.ctypes.data)
return col
def get_light_u(self, name: Optional[str] = None) -> Optional[np.ndarray]:
"""Get parallelogram light U vector.
Parameters
----------
name : string, optional
Name of the light (last added light if ``None``).
Returns
-------
out : np.ndarray, optional
Light U vector or ``None`` if failed on accessing light data.
"""
if name is None:
if len(self.light_handles) > 0: name = list(self.light_handles.keys())[-1]
else: raise ValueError()
if not isinstance(name, str): name = str(name)
if name not in self.light_handles:
msg = "Light %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
u = np.ascontiguousarray([0, 0, 0], dtype=np.float32)
self._optix.get_light_u(name, u.ctypes.data)
return u
def get_light_v(self, name: Optional[str] = None) -> Optional[np.ndarray]:
"""Get parallelogram light V vector.
Parameters
----------
name : string, optional
Name of the light (last added light if ``None``).
Returns
-------
out : np.ndarray, optional
Light V vector or ``None`` if failed on accessing light data.
"""
if name is None:
if len(self.light_handles) > 0: name = list(self.light_handles.keys())[-1]
else: raise ValueError()
if not isinstance(name, str): name = str(name)
if name not in self.light_handles:
msg = "Light %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
v = np.ascontiguousarray([0, 0, 0], dtype=np.float32)
self._optix.get_light_v(name, v.ctypes.data)
return v
def get_light_r(self, name: Optional[str] = None) -> Optional[float]:
"""Get spherical light radius.
Parameters
----------
name : string, optional
Name of the light (last added light if ``None``).
Returns
-------
out : float, optional
Light readius or ``None`` if failed on accessing light data.
"""
if name is None:
if len(self.light_handles) > 0: name = list(self.light_handles.keys())[-1]
else: raise ValueError()
if not isinstance(name, str): name = str(name)
if name not in self.light_handles:
msg = "Light %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
return self._optix.get_light_r(name)
def get_light(self, name: str) -> Optional[dict]:
"""Get light source parameters.
Parameters
----------
name : string
Name of the light source.
Returns
-------
out : dict, optional
Dictionary of the light source parameters or ``None`` if
failed on accessing the data.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name not in self.light_handles:
msg = "Light %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return None
s = self._optix.get_light(name)
if len(s) > 2: return json.loads(s)
else:
msg = "Failed on reading light %s." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
return None
def setup_spherical_light(self, name: str, pos: Optional[Any] = None,
autofit_camera: Optional[str] = None,
color: Optional[Any] = None,
radius: float = -1,
in_geometry: bool = True) -> None:
"""Setup new or update existing spherical light.
Updating an existing light with this method will not change its visibility.
Only ``pos``, ``color``, and ``radius`` values can be updated.
Parameters
----------
name : string
Name of the new light.
pos : array_like, optional
3D position.
autofit_camera : string, optional
Name of the camera used to compute light position automatically.
color : Any, optional
RGB color of the light; single value is gray, array_like is RGB
color components. Color value range is (0; inf) as it means the
light intensity.
radius : float, optional
Sphere radius.
in_geometry: bool, optional
Visible in the scene if set to ``True``.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name in self.light_handles:
self.update_light(name, pos=pos, color=color, radius=radius)
return
if color is None: color = 10 * np.ascontiguousarray([1, 1, 1], dtype=np.float32)
if radius <= 0: radius = 1.0
autofit = False
pos = _make_contiguous_vector(pos, 3)
if pos is None:
cam_name, _ = self.get_camera_name_handle(autofit_camera)
if cam_name is None:
msg = "Need 3D coordinates for the new light."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
pos = np.ascontiguousarray([0, 0, 0])
autofit = True
color = _make_contiguous_vector(color, 3)
if color is None:
msg = "Need color (single value or 3-element array/list/tuple)."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
h = self._optix.setup_spherical_light(name, pos.ctypes.data, color.ctypes.data,
radius, in_geometry)
if h != 0:
self._logger.info("Light %s handle: %d.", name, h)
self.light_handles[name] = h
self.light_names[h] = name
if autofit:
self.light_fit(name, camera=cam_name)
else:
msg = "Light %s setup failed." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
def setup_parallelogram_light(self, name: str, pos: Optional[Any] = None,
autofit_camera: Optional[str] = None,
color: Optional[Any] = None,
u: Optional[Any] = None,
v: Optional[Any] = None,
in_geometry: bool = True) -> None:
"""Setup new or update existing parallelogram light.
Note, the light direction is UxV, the back side is black.
Properties that can be updated: ``pos``, ``color``, ``u``, ``v``.
Parameters
----------
name : string
Name of the new light.
pos : array_like, optional
3D position.
autofit_camera : string, optional
Name of the camera used to compute light position automatically.
color : Any, optional
RGB color of the light; single value is gray, array_like is RGB
color components. Color value range is (0; inf) as it means the
light intensity.
u : array_like, optional
Parallelogram U vector.
v : array_like, optional
Parallelogram V vector.
in_geometry: bool, optional
Visible in the scene if set to ``True``.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name in self.light_handles:
self.update_light(name, pos=pos, color=color, u=u, v=v)
return
if color is None: color = 10 * np.ascontiguousarray([1, 1, 1], dtype=np.float32)
if u is None: u = np.ascontiguousarray([0, 1, 0], dtype=np.float32)
if v is None: v = np.ascontiguousarray([-1, 0, 0], dtype=np.float32)
autofit = False
pos = _make_contiguous_vector(pos, 3)
if pos is None:
cam_name, _ = self.get_camera_name_handle(autofit_camera)
if cam_name is None:
msg = "Need 3D coordinates for the new light."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
pos = np.ascontiguousarray([0, 0, 0])
autofit = True
color = _make_contiguous_vector(color, 3)
if color is None:
msg = "Need color (single value or 3-element array/list/tuple)."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
u = _make_contiguous_vector(u, 3)
if u is None:
msg = "Need 3D vector U."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
v = _make_contiguous_vector(v, 3)
if v is None:
msg = "Need 3D vector V."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
h = self._optix.setup_parallelogram_light(name, pos.ctypes.data, color.ctypes.data,
u.ctypes.data, v.ctypes.data, in_geometry)
if h != 0:
self._logger.info("Light %s handle: %d.", name, h)
self.light_handles[name] = h
self.light_names[h] = name
if autofit:
self.light_fit(name, camera=cam_name)
else:
msg = "Light %s setup failed." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def setup_area_light(self, name: str,
center: Optional[Any] = None, target: Optional[Any] = None,
u: Optional[float] = None, v: Optional[float] = None,
color: Optional[Any] = None,
in_geometry: bool = True) -> None:
"""Setup new or update existing area (parallelogram) light.
Convenience method to setup parallelogram light with ``center`` and ``target`` 3D points,
and scalar lengths of sides ``u`` and ``v``.
Parameters
----------
name : string
Name of the new light.
center : array_like
3D position of the light center.
target : array_like
3D position of the light target.
u : float
Horizontal side length.
v : float
Vertical side length.
color : Any, optional
RGB color of the light; single value is gray, array_like is RGB
color components. Color value range is (0; inf) as it means the
light intensity.
in_geometry: bool, optional
Visible in the scene if set to ``True``.
"""
if name in self.light_handles:
self.update_area_light(name, center, target, u, v, color)
return
if center is None or target is None or u is None or v is None:
msg = "Need ceter, target, u, and v for the new light."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if color is None:
color = 10 * np.ascontiguousarray([1, 1, 1], dtype=np.float32)
center = _make_contiguous_vector(center, 3)
target = _make_contiguous_vector(target, 3)
n = target - center
n = n / np.linalg.norm(n)
uvec = np.cross(n, [0, 1, 0])
uvec = uvec / np.linalg.norm(uvec)
vvec = np.cross(uvec, n)
vvec = vvec / np.linalg.norm(vvec)
uvec *= -u
vvec *= v
pos = center - 0.5 * (vvec + uvec)
self.setup_parallelogram_light(name, pos=pos, color=color, u=uvec, v=vvec, in_geometry=in_geometry)
def setup_light(self, name: str,
light_type: Union[Light, str] = Light.Spherical,
pos: Optional[Any] = None,
autofit_camera: Optional[str] = None,
color: Optional[Any] = None,
u: Optional[Any] = None,
v: Optional[Any] = None,
radius: float = -1,
in_geometry: bool = True) -> None:
"""Setup a new light or update an existing light.
Note, the parallelogram light direction is UxV, the back side is black.
Updating an existing light with this method will not change the type of light,
nor its visibility. Only ``pos``, ``color``, ``radius``, ``u``, and ``v`` values
can be updated.
Parameters
----------
name : string
Name of the new light.
light_type : Light enum or string
Light type (parallelogram, spherical, ...), see :class:`plotoptix.enums.Light` enum.
pos : array_like, optional
3D position.
autofit_camera : string, optional
Name of the camera used to compute light position automatically.
color : Any, optional
RGB color of the light; single value is gray, array_like is RGB
color components. Color value range is (0; inf) as it means the
light intensity.
u : array_like, optional
Parallelogram U vector.
v : array_like, optional
Parallelogram V vector.
radius : float, optional
Sphere radius.
in_geometry: bool, optional
Visible in the scene if set to ``True``.
"""
if name is None: raise ValueError()
if name in self.light_handles:
self.update_light(name, pos=pos, color=color,
radius=radius, u=u, v=v)
return
if color is None: color = 10 * np.ascontiguousarray([1, 1, 1], dtype=np.float32)
if u is None: u = np.ascontiguousarray([0, 1, 0], dtype=np.float32)
if v is None: v = np.ascontiguousarray([-1, 0, 0], dtype=np.float32)
if radius <= 0: radius = 1.0
if isinstance(light_type, str): light_type = Light[light_type]
if light_type == Light.Spherical:
self.setup_spherical_light(name, pos=pos,
autofit_camera=autofit_camera,
color=color, radius=radius,
in_geometry=in_geometry)
elif light_type == Light.Parallelogram:
self.setup_parallelogram_light(name, pos=pos,
autofit_camera=autofit_camera,
color=color, u=u, v=v,
in_geometry=in_geometry)
def update_light(self, name: str,
pos: Optional[Any] = None,
color: Optional[Any] = None,
radius: float = -1,
u: Optional[Any] = None,
v: Optional[Any] = None) -> None:
"""Update light parameters.
Note, the parallelogram light direction is UxV, the back side is black.
Parameters
----------
name : string
Name of the light.
pos : array_like, optional
3D position.
color : Any, optional
RGB color of the light; single value is gray, array_like is RGB
color components. Color value range is (0; inf) as it means the
light intensity.
radius : float, optional
Sphere radius.
u : array_like, optional
Parallelogram U vector.
v : array_like, optional
Parallelogram V vector.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name not in self.light_handles:
msg = "Light %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
pos = _make_contiguous_vector(pos, 3)
if pos is not None: pos_ptr = pos.ctypes.data
else: pos_ptr = 0
color = _make_contiguous_vector(color, 3)
if color is not None: color_ptr = color.ctypes.data
else: color_ptr = 0
u = _make_contiguous_vector(u, 3)
if u is not None: u_ptr = u.ctypes.data
else: u_ptr = 0
v = _make_contiguous_vector(v, 3)
if v is not None: v_ptr = v.ctypes.data
else: v_ptr = 0
if self._optix.update_light(name,
pos_ptr, color_ptr,
radius, u_ptr, v_ptr):
self._logger.info("Light %s updated.", name)
else:
msg = "Light %s update failed." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def update_area_light(self, name: str,
center: Optional[Any] = None, target: Optional[Any] = None,
u: Optional[float] = None, v: Optional[float] = None,
color: Optional[Any] = None) -> None:
"""Update area (parallelogram) light.
Convenience method to update parallelogram light with ``center`` and ``target`` 3D points,
and scalar lengths of sides ``u`` and ``v``.
Parameters
----------
name : string
Name of the new light.
center : array_like, optional
3D position of the light center.
target : array_like, optional
3D position of the light target.
u : float, optional
Horizontal side length.
v : float, optional
Vertical side length.
color : Any, optional
RGB color of the light; single value is gray, array_like is RGB
color components. Color value range is (0; inf) as it means the
light intensity.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name not in self.light_handles:
msg = "Light %s does not exists." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if u is None:
u = np.linalg.norm(self.get_light_u(name))
if v is None:
v = np.linalg.norm(self.get_light_v(name))
if center is None:
center = self.get_light_pos(name) + 0.5 * (self.get_light_u(name) + self.get_light_v(name))
if target is None:
n = np.cross(self.get_light_u(name), self.get_light_v(name))
target = center + 100 * n
center = _make_contiguous_vector(center, 3)
target = _make_contiguous_vector(target, 3)
n = target - center
n = n / np.linalg.norm(n)
uvec = np.cross(n, [0, 1, 0])
uvec = uvec / np.linalg.norm(uvec)
vvec = np.cross(uvec, n)
vvec = vvec / np.linalg.norm(vvec)
uvec *= -u
vvec *= v
pos = center - 0.5 * (vvec + uvec)
self.update_light(name, pos=pos, color=color, u=uvec, v=vvec)
def light_fit(self, light: str,
camera: Optional[str] = None,
horizontal_rot: float = 45,
vertical_rot: float = 25,
dist_scale: float = 1.5) -> None:
"""Fit light position and direction to the camera.
Parameters
----------
name : string
Name of the light.
camera : string, optional
Name of the camera; current camera is used if not provided.
horizontal_rot : float, optional
Angle: eye - target - light in the camera horizontal plane.
vertical_rot : float, optional
Angle: eye - target - light in the camera vertical plane.
dist_scale : float, optional
Light to target distance with reespect to the eye to target distance.
"""
if light is None: raise ValueError()
if not isinstance(light, str): light = str(light)
if not light in self.light_handles:
msg = "Light %s not found." % light
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
cam_handle = 0
if camera is not None:
if not isinstance(camera, str): camera = str(camera)
if camera in self.camera_handles:
cam_handle = self.camera_handles[camera]
horizontal_rot = math.pi * horizontal_rot / 180.0
vertical_rot = math.pi * vertical_rot / 180.0
self._optix.fit_light(light, cam_handle, horizontal_rot, vertical_rot, dist_scale)
def get_material(self, name: str) -> Optional[dict]:
"""Get material parameters.
Parameters
----------
name : string
Name of the material.
Returns
-------
out : dict, optional
Dictionary of the material parameters or ``None`` if failed on
accessing material data.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
s = self._optix.get_material(name)
if len(s) > 2: return json.loads(s)
else:
msg = "Failed on reading material %s." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
return None
def setup_material(self, name: str, data: dict) -> None:
"""Setup new or update existing material.
Note: for maximum performance, setup only those materials
you need in the scene.
Parameters
----------
name : string
Name of the material.
data : dict
Parameters of the material.
See Also
--------
:py:mod:`plotoptix.materials`
"""
if name is None or data is None: raise ValueError()
if self._optix.setup_material(name, json.dumps(data)):
self._logger.info("Configured material %s.", name)
else:
msg = "Material %s not configured." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def update_material(self, name: str, data: dict, refresh: bool = False) -> None:
"""Update material properties.
Update material properties and optionally refresh the scene.
Parameters
----------
name : string
Name of the material.
data : dict
Parameters of the material.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
See Also
--------
:py:mod:`plotoptix.materials`
"""
self.setup_material(name, data)
if refresh: self._optix.refresh_scene()
def update_material_texture(self, name: str, data: Any, idx: int = 0, keep_on_host: bool = False, refresh: bool = False) -> None:
"""Update material texture data.
Update texture content/size for material ``name`` data. Texture format has to be RGBA,
width/height are deduced from the ``data`` array shape. Use ``keep_on_host=True``
to make a copy of data in the host memory (in addition to GPU memory), this
option is required when (small) textures are going to be saved to JSON description
of the scene.
Parameters
----------
name : string
Material name.
data : array_like
Texture data.
idx : int, optional
Texture index, the first texture if the default is left.
keep_on_host : bool, optional
Store texture data copy in the host memory.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
"""
if not isinstance(name, str): name = str(name)
if not isinstance(data, np.ndarray): data = np.ascontiguousarray(data, dtype=np.float32)
if len(data.shape) != 3 or data.shape[-1] != 4:
msg = "Material texture shape should be (height,width,4)."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if data.dtype != np.float32: data = np.ascontiguousarray(data, dtype=np.float32)
if not data.flags['C_CONTIGUOUS']: data = np.ascontiguousarray(data, dtype=np.float32)
self._logger.info("Set material %s texture %d: %d x %d.", name, idx, data.shape[1], data.shape[0])
if not self._optix.set_material_texture(name, idx, data.ctypes.data, data.shape[1], data.shape[0], RtFormat.Float4.value, keep_on_host, refresh):
msg = "Material %s texture not uploaded." % name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def set_correction_curve(self, ctrl_points: Any,
channel: Union[Channel, str] = Channel.Gray,
n_points: int = 256,
range: float = 255,
refresh: bool = False) -> None:
"""Set correction curve.
Calculate and setup a color correction curve using control points provided with
``ctrl_points``. Curve is applied in 2D postprocessing stage to the selected
``channel``. Control points should be an array_like set of input-output values
(array shape is ``(m,2)``). Control point input and output maximum value can be
provided with the ``range`` parameter. Control points are scaled to the range
<0;1>, extreme values (0,0) and (1,1) are added if not present in ``ctrl_points``
(use :meth:`plotoptix.NpOptiX.set_texture_1d` if custom correction curve should
e.g. start above 0 or saturate at a level lower than 1).
Smooth bezier curve is calculated from the control points and stored in 1D texture
with ``n_points`` length.
Parameters
----------
ctrl_points : array_like
Control points to construct curve.
channel : Channel or string, optional
Destination color for the correction curve.
n_points : int, optional
Number of curve points to be stored in texture.
range : float, optional
Maximum input / output value corresponding to provided ``ctrl_points``.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
See Also
--------
:py:mod:`plotoptix.enums.Postprocessing`
:py:mod:`plotoptix.enums.Channel`
"""
if isinstance(channel, str): channel = Channel[channel]
if not isinstance(ctrl_points, np.ndarray): ctrl_points = np.ascontiguousarray(ctrl_points, dtype=np.float32)
if len(ctrl_points.shape) != 2 or ctrl_points.shape[1] != 2:
msg = "Control points shape should be (n,2)."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if ctrl_points.dtype != np.float32: ctrl_points = np.ascontiguousarray(ctrl_points, dtype=np.float32)
if not ctrl_points.flags['C_CONTIGUOUS']: ctrl_points = np.ascontiguousarray(ctrl_points, dtype=np.float32)
self._logger.info("Set correction curve in %s channel.", channel.name)
if not self._optix.set_correction_curve(ctrl_points.ctypes.data, ctrl_points.shape[0], n_points, channel.value, range, refresh):
msg = "Correction curve setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def add_postproc(self, stage: Union[Postprocessing, str], refresh: bool = False) -> None:
"""Add 2D postprocessing stage.
Stages are applied to image in the order they are added with this
method. Each stage algorithm has its own variables that should be
configured before adding the postprocessing stage. Configuration
can be updated at any time, but stages cannot be disabled after
adding. See :py:mod:`plotoptix.enums.Postprocessing` for algorithms
configuration examples.
Parameters
----------
stage : Postprocessing or string
Postprocessing algorithm to add.
refresh : bool, optional
Set to ``True`` if the image should be re-computed.
See Also
--------
:py:mod:`plotoptix.enums.Postprocessing`
"""
if isinstance(stage, str): stage = Postprocessing[stage]
self._logger.info("Add postprocessing stage: %s.", stage.name)
if not self._optix.add_postproc(stage.value, refresh):
msg = "Configuration of postprocessing stage %s failed." % stage.name
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
def set_data(self, name: str, pos: Optional[Any] = None,
r: Optional[Any] = None, c: Optional[Any] = None,
u: Optional[Any] = None, v: Optional[Any] = None, w: Optional[Any] = None,
geom: Union[Geometry, str] = Geometry.ParticleSet,
geom_attr: Union[GeomAttributeProgram, str] = GeomAttributeProgram.Default,
mat: Optional[str] = None,
rnd: bool = True) -> None:
"""Create new or update existing geometry for the dataset.
Data is provided as an array of 3D positions of data points, with the shape ``(n, 3)``.
Additional features can be visualized as a color and size/thickness of the primitives.
Note: not all arguments are used to update existing geeometry. Update is available for:
``mat``, ``pos``, ``c``, ``r``, ``u``, ``v``, and ``w`` data.
Parameters
----------
name : string
Name of the geometry.
pos : array_like, optional
Positions of data points.
c : Any, optional
Colors of the primitives. Single value means a constant gray level.
3-component array means constant RGB color. Array with the shape[0]
equal to the number of primitives will set individual gray/color for
each primitive.
r : Any, optional
Radii of particles / bezier primitives or U / V / W lengths of
parallelograms / parallelepipeds / tetrahedrons (if u / v / w not provided).
Single value sets const. size for all primitives.
u : array_like, optional
U vector(s) of parallelograms / parallelepipeds / tetrahedrons / textured particles.
Single vector sets const. value for all primitives.
v : array_like, optional
V vector(s) of parallelograms / parallelepipeds / tetrahedrons / textured particles.
Single vector sets const. value for all primitives.
w : array_like, optional
W vector(s) of parallelepipeds / tetrahedrons. Single vector sets const.
value for all primitives.
geom : Geometry enum or string, optional
Geometry of primitives (ParticleSet, Tetrahedrons, ...). See :class:`plotoptix.enums.Geometry`
enum.
geom_attr : GeomAttributeProgram enum or string, optional
Geometry attributes program. See :class:`plotoptix.enums.GeomAttributeProgram` enum.
mat : string, optional
Material name.
rnd : bool, optional
Randomize not provided U / V / W vectors so regular but randomly rotated
primitives are generated using available vectors (default). If set to
``False`` all primitives are aligned in the same direction.
See Also
--------
:meth:`plotoptix.NpOptiX.update_data`
:class:`plotoptix.enums.Geometry`
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if isinstance(geom, str): geom = Geometry[geom]
if isinstance(geom_attr, str): geom_attr = GeomAttributeProgram[geom_attr]
if name in self.geometry_data:
self.update_data(name, mat=mat, pos=pos, c=c, r=r, u=u, v=v, w=w)
return
if pos is None:
msg = "pos argument required for new geometries."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if r is None: r = np.ascontiguousarray([0.05], dtype=np.float32)
if c is None: c = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32)
if mat is None: mat = "diffuse"
n_primitives = -1
# Prepare positions data
pos = _make_contiguous_3d(pos)
if pos is None:
msg = "Positions (pos) are required for the new instances and cannot be left as None."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if (len(pos.shape) != 2) or (pos.shape[0] < 1) or (pos.shape[1] != 3):
msg = "Positions (pos) should be an array of shape (n, 3)."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
n_primitives = pos.shape[0]
pos_ptr = pos.ctypes.data
# Prepare colors data
c = np.ascontiguousarray(c, dtype=np.float32)
if c.shape == (1,):
c = np.ascontiguousarray([c[0], c[0], c[0]], dtype=np.float32)
col_const_ptr = c.ctypes.data
col_ptr = 0
elif c.shape == (3,):
col_const_ptr = c.ctypes.data
col_ptr = 0
else:
c = _make_contiguous_3d(c, n=n_primitives, extend_scalars=True)
assert c.shape == pos.shape, "Colors and data points shapes must be the same."
if c is not None: col_ptr = c.ctypes.data
else: col_ptr = 0
col_const_ptr = 0
# Prepare radii data
if r is not None:
if not isinstance(r, np.ndarray): r = np.ascontiguousarray(r, dtype=np.float32)
if r.dtype != np.float32: r = np.ascontiguousarray(r, dtype=np.float32)
if len(r.shape) > 1: r = r.flatten()
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if r is not None:
if r.shape[0] == 1:
if n_primitives > 0: r = np.full(n_primitives, r[0], dtype=np.float32)
else:
msg = "Cannot resolve proper radii (r) shape from preceding data arguments."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if (n_primitives > 0) and (n_primitives != r.shape[0]):
msg = "Radii (r) shape does not match shape of preceding data arguments."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
n_primitives = r.shape[0]
radii_ptr = r.ctypes.data
else: radii_ptr = 0
# Prepare U vectors
u = _make_contiguous_3d(u, n=n_primitives)
u_ptr = 0
if u is not None:
u_ptr = u.ctypes.data
# Prepare V vectors
v = _make_contiguous_3d(v, n=n_primitives)
v_ptr = 0
if v is not None:
v_ptr = v.ctypes.data
# Prepare W vectors
w = _make_contiguous_3d(w, n=n_primitives)
w_ptr = 0
if w is not None:
w_ptr = w.ctypes.data
if n_primitives == -1:
msg = "Could not figure out proper data shapes."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
# Configure according to selected geometry
is_ok = True
if geom == Geometry.ParticleSet:
if c is None:
msg = "ParticleSet setup failed, colors data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if r is None:
msg = "ParticleSet setup failed, radii data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
elif geom == Geometry.ParticleSetTextured:
if r is None:
msg = "ParticleSetTextured setup failed, radii data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if (u is None) or (v is None):
if r is None:
msg = "ParticleSetTextured setup failed, need U / V vectors or radii data."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
elif geom == Geometry.Parallelograms:
if c is None:
msg = "Parallelograms setup failed, colors data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if (u is None) or (v is None):
if r is None:
msg = "Parallelograms setup failed, need U / V vectors or radii data."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
elif (geom == Geometry.Parallelepipeds) or (geom == Geometry.Tetrahedrons):
if c is None:
msg = "Plot setup failed, colors data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if (u is None) or (v is None) or (w is None):
if r is None:
msg = "Plot setup failed, need U, V, W vectors or radii data."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
elif geom == Geometry.BezierChain:
if c is None:
msg = "BezierChain setup failed, colors data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if r is None:
msg = "BezierChain setup failed, radii data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
elif geom == Geometry.SegmentChain:
if n_primitives < 2:
msg = "SegmentChain requires at least 2 data points."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if c is None:
msg = "SegmentChain setup failed, colors data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if r is None:
msg = "SegmentChain setup failed, radii data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
elif geom == Geometry.BSplineQuad:
if n_primitives < 3:
msg = "BSplineQuad requires at least 3 data points."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if c is None:
msg = "BSplineQuad setup failed, colors data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if r is None:
msg = "BSplineQuad setup failed, radii data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
elif geom == Geometry.BSplineCubic:
if n_primitives < 4:
msg = "BSplineCubic requires at least 4 data points."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if c is None:
msg = "BSplineCubic setup failed, colors data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if r is None:
msg = "BSplineCubic setup failed, radii data is missing."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
else:
msg = "Unknown geometry"
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
is_ok = False
if is_ok:
try:
self._padlock.acquire()
self._logger.info("Create %s %s, %d primitives...", geom.name, name, n_primitives)
g_handle = self._optix.setup_geometry(geom.value, geom_attr.value, name, mat, rnd, n_primitives,
pos_ptr, col_const_ptr, col_ptr, radii_ptr, u_ptr, v_ptr, w_ptr)
if g_handle > 0:
self._logger.info("...done, handle: %d", g_handle)
self.geometry_data[name] = GeometryMeta(name, g_handle, n_primitives)
self.geometry_names[g_handle] = name
else:
msg = "Geometry setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def update_data(self, name: str,
mat: Optional[str] = None,
pos: Optional[Any] = None, c: Optional[Any] = None, r: Optional[Any] = None,
u: Optional[Any] = None, v: Optional[Any] = None, w: Optional[Any] = None) -> None:
"""Update data of an existing geometry.
Note that on data size changes (``pos`` array size different than provided with :meth:`plotoptix.NpOptiX.set_data`)
also other properties must be provided matching the new size, otherwise default values are used.
Parameters
----------
name : string
Name of the geometry.
mat : string, optional
Material name.
pos : array_like, optional
Positions of data points.
c : Any, optional
Colors of the primitives. Single value means a constant gray level.
3-component array means constant RGB color. Array with the shape[0]
equal to the number of primitives will set individual grey/color for
each primitive.
r : Any, optional
Radii of particles / bezier primitives. Single value sets constant
radius for all primitives.
u : array_like, optional
U vector(s) of parallelograms / parallelepipeds / tetrahedrons / textured particles.
Single vector sets const. value for all primitives.
v : array_like, optional
V vector(s) of parallelograms / parallelepipeds / tetrahedrons / textured particles.
Single vector sets const. value for all primitives.
w : array_like, optional
W vector(s) of parallelepipeds / tetrahedrons. Single vector sets const.
value for all primitives.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if mat is None: mat = ""
if not name in self.geometry_data:
msg = "Geometry %s does not exists yet, use set_data() instead." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
n_primitives = self.geometry_data[name]._size
size_changed = False
# Prepare positions data
pos = _make_contiguous_3d(pos)
pos_ptr = 0
if pos is not None:
if (len(pos.shape) != 2) or (pos.shape[0] < 1) or (pos.shape[1] != 3):
msg = "Positions (pos) should be an array of shape (n, 3)."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
n_primitives = pos.shape[0]
size_changed = (n_primitives != self.geometry_data[name]._size)
pos_ptr = pos.ctypes.data
# Prepare colors data
col_const_ptr = 0
col_ptr = 0
if size_changed and c is None:
c = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32)
elif c is not None:
c = np.ascontiguousarray(c, dtype=np.float32)
if c is not None:
if c.shape == (1,):
c = np.ascontiguousarray([c[0], c[0], c[0]], dtype=np.float32)
col_const_ptr = c.ctypes.data
elif c.shape == (3,):
col_const_ptr = c.ctypes.data
else:
c = _make_contiguous_3d(c, n=n_primitives, extend_scalars=True)
if c is not None: col_ptr = c.ctypes.data
# Prepare radii data
if size_changed and r is None:
r = np.ascontiguousarray([0.05], dtype=np.float32)
if r is not None:
if not isinstance(r, np.ndarray): r = np.ascontiguousarray(r, dtype=np.float32)
if r.dtype != np.float32: r = np.ascontiguousarray(r, dtype=np.float32)
if len(r.shape) > 1: r = r.flatten()
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
radii_ptr = 0
if r is not None:
if r.shape[0] == 1:
r = np.full(n_primitives, r[0], dtype=np.float32)
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if n_primitives != r.shape[0]:
msg = "Radii (r) shape does not match shape of preceding data arguments."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
radii_ptr = r.ctypes.data
# Prepare U vectors
u = _make_contiguous_3d(u, n=n_primitives)
u_ptr = 0
if u is not None: u_ptr = u.ctypes.data
# Prepare V vectors
v = _make_contiguous_3d(v, n=n_primitives)
v_ptr = 0
if v is not None: v_ptr = v.ctypes.data
# Prepare W vectors
w = _make_contiguous_3d(w, n=n_primitives)
w_ptr = 0
if w is not None: w_ptr = w.ctypes.data
try:
self._padlock.acquire()
self._logger.info("Update %s, %d primitives...", name, n_primitives)
g_handle = self._optix.update_geometry(name, mat, n_primitives,
pos_ptr, col_const_ptr, col_ptr, radii_ptr,
u_ptr, v_ptr, w_ptr)
if (g_handle > 0) and (g_handle == self.geometry_data[name]._handle):
self._logger.info("...done, handle: %d", g_handle)
self.geometry_data[name]._size = n_primitives
else:
msg = "Geometry update failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def set_data_2d(self, name: str, pos: Optional[Any] = None,
r: Optional[Any] = None, c: Optional[Any] = None,
normals: Optional[Any] = None,
range_x: Optional[Tuple[float, float]] = None,
range_z: Optional[Tuple[float, float]] = None,
floor_y: Optional[float] = None,
floor_c: Optional[Any] = None,
geom: Union[Geometry, str] = Geometry.Mesh,
mat: Optional[str] = None,
make_normals: bool = False) -> None:
"""Create new or update existing surface geometry for the 2D dataset.
Data is provided as 2D array of :math:`z = f(x, y)` values, with the shape ``(n, m)``,
where ``n`` and ``m`` are at least 2. Additional data features can be
visualized with color (array of RGB values, shape ``(n, m, 3)``).
Convention of vertical Y and horizontal XZ plane is adopted.
Note: not all arguments are used to update existing geeometry. Update is available for:
``mat``, ``pos``, ``c``, ``r``, ``normals``, ``range_x``, ``range_z``, ``floor_y``,
and ``floor_c`` data.
Parameters
----------
name : string
Name of the new surface geometry.
pos : array_like, optional
Z values of data points.
r : Any, optional
Radii of vertices for the :attr:`plotoptix.enums.Geometry.Graph` geometry,
interpolated along the wireframe edges. Single value sets constant radius
for all vertices.
c : Any, optional
Colors of data points. Single value means a constant gray level.
3-component array means a constant RGB color. Array of the shape
``(n, m, 3)`` will set individual color for each data point,
interpolated between points; ``n`` and ``m`` have to be the same
as in data points shape.
normals : array_like, optional
Surface normal vectors at data points. Array shape has to be ``(n, m, 3)``,
with ``n`` and ``m`` the same as in data points shape.
range_x : tuple (float, float), optional
Data range along X axis. Data array indexes are used if range is
not provided.
range_z : tuple (float, float), optional
Data range along Z axis. Data array indexes are used if range is
not provided.
floor_y : float, optional
Y level of XZ plane forming the base of the new geometry. Surface
only is created if ``floor_y`` is not provided.
floor_c: Any, optional
Color of the base volume. Single value or array_like RGB color values.
geom : Geometry enum or string, optional
Geometry of the surface, only :attr:`plotoptix.enums.Geometry.Mesh` or
:attr:`plotoptix.enums.Geometry.Graph` are supported.
mat : string, optional
Material name.
make_normals : bool, optional
Calculate normals for data points, only if not provided with ``normals``
argument. Normals of all triangles attached to the point are averaged.
See Also
--------
:meth:`plotoptix.NpOptiX.update_data_2d`
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name in self.geometry_data:
self.update_data_2d(name,
mat=mat, pos=pos, r=r, c=c, normals=normals,
range_x=range_x, range_z=range_x,
floor_y=floor_y, floor_c=floor_c)
return
if pos is None:
msg = "pos argument required for new geometries."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if r is None: r = np.ascontiguousarray([0.05], dtype=np.float32)
if c is None: c = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32)
if mat is None: mat = "diffuse"
if isinstance(geom, str): geom = Geometry[geom]
if not geom in [Geometry.Mesh, Geometry.Graph]:
msg = "Geometry type %s not supported by the surface plot." % geom.name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if not isinstance(pos, np.ndarray): pos = np.ascontiguousarray(pos, dtype=np.float32)
assert len(pos.shape) == 2 and pos.shape[0] > 1 and pos.shape[1] > 1, "Required vertex data shape is (z,x), where z >= 2 and x >= 2."
if pos.dtype != np.float32: pos = np.ascontiguousarray(pos, dtype=np.float32)
if not pos.flags['C_CONTIGUOUS']: pos = np.ascontiguousarray(pos, dtype=np.float32)
pos_ptr = pos.ctypes.data
if r is not None and geom == Geometry.Graph:
if not isinstance(r, np.ndarray): r = np.ascontiguousarray(r, dtype=np.float32)
if r.dtype != np.float32: r = np.ascontiguousarray(r, dtype=np.float32)
if len(r.shape) > 1 or r.shape[0] > 1:
assert r.shape == pos.shape[:2], "Radii shape must be (v,u), with u and v matching the surface points shape."
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if r is not None and geom == Geometry.Graph:
if r.shape[0] == 1:
r = np.full(pos.shape[:2], r[0], dtype=np.float32)
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if r.shape != pos.shape[:2]:
msg = "Radii (r) shape does not match the shape of preceding data arguments."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
radii_ptr = r.ctypes.data
else: radii_ptr = 0
n_ptr = 0
if normals is not None:
if not isinstance(normals, np.ndarray): normals = np.ascontiguousarray(normals, dtype=np.float32)
assert len(normals.shape) == 3 and normals.shape == pos.shape + (3,), "Normals shape must be (z,x,3), where (z,x) id the vertex data shape."
if normals.dtype != np.float32: normals = np.ascontiguousarray(normals, dtype=np.float32)
if not normals.flags['C_CONTIGUOUS']: normals = np.ascontiguousarray(normals, dtype=np.float32)
n_ptr = normals.ctypes.data
make_normals = False
c_ptr = 0
c_const = None
if c is not None:
if isinstance(c, float) or isinstance(c, int): c = np.full(3, c, dtype=np.float32)
if not isinstance(c, np.ndarray): c = np.ascontiguousarray(c, dtype=np.float32)
if c.shape == (3,):
c_const = c
cm = np.zeros(pos.shape + (3,), dtype=np.float32)
cm[:,:] = c
c = cm
assert len(c.shape) == 3 and c.shape == pos.shape + (3,), "Colors shape must be (m,n,3), where (m,n) id the vertex data shape."
if c.dtype != np.float32: c = np.ascontiguousarray(c, dtype=np.float32)
if not c.flags['C_CONTIGUOUS']: c = np.ascontiguousarray(c, dtype=np.float32)
c_ptr = c.ctypes.data
make_floor = floor_y is not None
if not make_floor: floor_y = np.float32(np.nan)
cl_ptr = 0
if make_floor:
if floor_c is not None:
if isinstance(floor_c, float) or isinstance(floor_c, int): floor_c = np.full(3, floor_c, dtype=np.float32)
if not isinstance(floor_c, np.ndarray): floor_c = np.ascontiguousarray(floor_c, dtype=np.float32)
if floor_c.shape == (3,):
if floor_c.dtype != np.float32: floor_c = np.ascontiguousarray(floor_c, dtype=np.float32)
if not floor_c.flags['C_CONTIGUOUS']: floor_c = np.ascontiguousarray(floor_c, dtype=np.float32)
cl_ptr = floor_c.ctypes.data
else:
self._logger.warn("Floor color should be a single value or RGB array.")
elif c_const is not None:
floor_c = np.ascontiguousarray(c_const, dtype=np.float32)
cl_ptr = floor_c.ctypes.data
if range_x is None: range_x = (np.float32(np.nan), np.float32(np.nan))
if range_z is None: range_z = (np.float32(np.nan), np.float32(np.nan))
try:
self._padlock.acquire()
self._logger.info("Setup surface %s...", name)
g_handle = self._optix.setup_surface(geom.value, name, mat, pos.shape[1], pos.shape[0], pos_ptr, radii_ptr, n_ptr, c_ptr, cl_ptr,
range_x[0], range_x[1], range_z[0], range_z[1], floor_y, make_normals)
if g_handle > 0:
self._logger.info("...done, handle: %d", g_handle)
self.geometry_data[name] = GeometryMeta(name, g_handle, pos.shape[0] * pos.shape[1])
self.geometry_names[g_handle] = name
else:
msg = "Surface setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def update_data_2d(self, name: str,
mat: Optional[str] = None,
pos: Optional[Any] = None,
r: Optional[Any] = None,
c: Optional[Any] = None,
normals: Optional[Any] = None,
range_x: Optional[Tuple[float, float]] = None,
range_z: Optional[Tuple[float, float]] = None,
floor_y: Optional[float] = None,
floor_c: Optional[Any] = None) -> None:
"""Update surface geometry data or properties.
Parameters
----------
name : string
Name of the surface geometry.
mat : string, optional
Material name.
pos : array_like, optional
Z values of data points.
r : Any, optional
Radii of vertices for the :attr:`plotoptix.enums.Geometry.Graph` geometry,
interpolated along the wireframe edges. Single value sets constant radius
for all vertices.
c : Any, optional
Colors of data points. Single value means a constant gray level.
3-component array means a constant RGB color. Array of the shape
``(n,m,3)`` will set individual color for each data point,
interpolated between points; ``n`` and ``m`` have to be the same
as in data points shape.
normals : array_like, optional
Surface normal vectors at data points. Array shape has to be
``(n,m,3)``, with ``n`` and``m`` the same as in data points shape.
range_x : tuple (float, float), optional
Data range along X axis.
range_z : tuple (float, float), optional
Data range along Z axis.
floor_y : float, optional
Y level of XZ plane forming the base of the geometry.
floor_c: Any, optional
Color of the base volume. Single value or array_like RGB color values.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if mat is None: mat = ""
if not name in self.geometry_data:
msg = "Surface %s does not exists yet, use set_data_2d() instead." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
s_x = c_uint()
s_z = c_uint()
if not self._optix.get_surface_size(name, byref(s_x), byref(s_z)):
msg = "Cannot get surface %s size." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
size_xz = (s_z.value, s_x.value)
size_changed = False
pos_ptr = 0
if pos is not None:
if not isinstance(pos, np.ndarray): pos = np.ascontiguousarray(pos, dtype=np.float32)
assert len(pos.shape) == 2 and pos.shape[0] > 1 and pos.shape[1] > 1, "Required vertex data shape is (z,x), where z >= 2 and x >= 2."
if pos.dtype != np.float32: pos = np.ascontiguousarray(pos, dtype=np.float32)
if not pos.flags['C_CONTIGUOUS']: pos = np.ascontiguousarray(pos, dtype=np.float32)
if pos.shape != size_xz: size_changed = True
size_xz = pos.shape
pos_ptr = pos.ctypes.data
if size_changed and r is None:
r = np.ascontiguousarray([0.05], dtype=np.float32)
if r is not None:
if not isinstance(r, np.ndarray): r = np.ascontiguousarray(r, dtype=np.float32)
if r.dtype != np.float32: r = np.ascontiguousarray(r, dtype=np.float32)
if len(r.shape) > 1 or r.shape[0] > 1:
assert r.shape == size_xz, "Radii shape must be (x,z), with x and z matching the data points shape."
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
radii_ptr = 0
if r is not None:
if r.shape[0] == 1:
r = np.full(size_xz, r[0], dtype=np.float32)
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if size_xz != r.shape:
msg = "Radii (r) shape does not match the number of data points."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
radii_ptr = r.ctypes.data
c_ptr = 0
c_const = None
if size_changed and c is None: c = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32)
if c is not None:
if isinstance(c, float) or isinstance(c, int): c = np.full(3, c, dtype=np.float32)
if not isinstance(c, np.ndarray): c = np.ascontiguousarray(c, dtype=np.float32)
if len(c.shape) == 1 and c.shape[0] == 3:
c_const = c
cm = np.zeros(size_xz + (3,), dtype=np.float32)
cm[:,:] = c
c = cm
assert len(c.shape) == 3 and c.shape == size_xz + (3,), "Colors shape must be (m,n,3), where (m,n) id the vertex data shape."
if c.dtype != np.float32: c = np.ascontiguousarray(c, dtype=np.float32)
if not c.flags['C_CONTIGUOUS']: c = np.ascontiguousarray(c, dtype=np.float32)
c_ptr = c.ctypes.data
n_ptr = 0
if normals is not None:
if not isinstance(normals, np.ndarray): normals = np.ascontiguousarray(normals, dtype=np.float32)
assert len(normals.shape) == 3 and normals.shape == size_xz + (3,), "Normals shape must be (z,x,3), where (z,x) id the vertex data shape."
if normals.dtype != np.float32: normals = np.ascontiguousarray(normals, dtype=np.float32)
if not normals.flags['C_CONTIGUOUS']: normals = np.ascontiguousarray(normals, dtype=np.float32)
n_ptr = normals.ctypes.data
cl_ptr = 0
if floor_c is not None:
if isinstance(floor_c, float) or isinstance(floor_c, int): floor_c = np.full(3, floor_c, dtype=np.float32)
if not isinstance(floor_c, np.ndarray): floor_c = np.ascontiguousarray(floor_c, dtype=np.float32)
if len(floor_c.shape) == 1 and floor_c.shape[0] == 3:
if floor_c.dtype != np.float32: floor_c = np.ascontiguousarray(floor_c, dtype=np.float32)
if not floor_c.flags['C_CONTIGUOUS']: floor_c = np.ascontiguousarray(floor_c, dtype=np.float32)
cl_ptr = floor_c.ctypes.data
else:
self._logger.warn("Floor color should be a single value or RGB array.")
if range_x is None: range_x = (np.float32(np.nan), np.float32(np.nan))
if range_z is None: range_z = (np.float32(np.nan), np.float32(np.nan))
if floor_y is None: floor_y = np.float32(np.nan)
try:
self._padlock.acquire()
self._logger.info("Update surface %s, size (%d, %d)...", name, size_xz[1], size_xz[0])
g_handle = self._optix.update_surface(name, mat, size_xz[1], size_xz[0],
pos_ptr, radii_ptr, n_ptr, c_ptr, cl_ptr,
range_x[0], range_x[1], range_z[0], range_z[1],
floor_y)
if (g_handle > 0) and (g_handle == self.geometry_data[name]._handle):
self._logger.info("...done, handle: %d", g_handle)
self.geometry_data[name]._size = size_xz[0] * size_xz[1]
else:
msg = "Geometry update failed."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def set_surface(self, name: str, pos: Optional[Any] = None,
r: Optional[Any] = None, c: Optional[Any] = None,
normals: Optional[Any] = None,
geom: Union[Geometry, str] = Geometry.Mesh,
mat: Optional[str] = None,
wrap_u: bool = False,
wrap_v: bool = False,
make_normals: bool = False) -> None:
"""Create new or update existing parametric surface geometry.
Data is provided as 2D array of :math:`[x, y, z] = f(u, v)` values, with the shape
``(n, m, 3)``, where ``n`` and ``m`` are at least 2. Additional data features can be
visualized with color (array of RGB values, shape ``(n, m, 3)``) or wireframe thickness
if the :attr:`plotoptix.enums.Geometry.Graph` geometry is used.
Note: not all arguments are used to update existing geeometry. Update is available for:
``mat``, ``pos``, ``c``, ``r``, and ``normals`` data.
Parameters
----------
name : string
Name of the new surface geometry.
pos : array_like, optional
XYZ values of surface points.
r : Any, optional
Radii of vertices for the :attr:`plotoptix.enums.Geometry.Graph` geometry,
interpolated along the wireframe edges. Single value sets constant radius
for all vertices.
c : Any, optional
Colors of surface points. Single value means a constant gray level.
3-component array means a constant RGB color. Array of the shape
``(n, m, 3)`` will set individual color for each surface point,
interpolated between points; ``n`` and ``m`` have to be the same
as in the surface points shape.
normals : array_like, optional
Normal vectors at provided surface points. Array shape has to be ``(n, m, 3)``,
with ``n`` and ``m`` the same as in the surface points shape.
geom : Geometry enum or string, optional
Geometry of the surface, only :attr:`plotoptix.enums.Geometry.Mesh` or
:attr:`plotoptix.enums.Geometry.Graph` are supported.
mat : string, optional
Material name.
wrap_u : bool, optional
Stitch surface edges making U axis continuous.
wrap_v : bool, optional
Stitch surface edges making V axis continuous.
make_normals : bool, optional
Calculate normals for surface points, only if not provided with ``normals``
argument. Normals of all triangles attached to the point are averaged.
See Also
--------
:meth:`plotoptix.NpOptiX.update_surface`
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name in self.geometry_data:
self.update_surface(name, mat=mat, pos=pos, r=r, c=c, normals=normals)
return
if pos is None:
msg = "pos argument required for new geometries."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if r is None: r = np.ascontiguousarray([0.05], dtype=np.float32)
if c is None: c = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32)
if mat is None: mat = "diffuse"
if isinstance(geom, str): geom = Geometry[geom]
if not geom in [Geometry.Mesh, Geometry.Graph]:
msg = "Geometry type %s not supported by the parametric surface." % geom.name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if not isinstance(pos, np.ndarray): pos = np.ascontiguousarray(pos, dtype=np.float32)
assert len(pos.shape) == 3 and pos.shape[0] > 1 and pos.shape[1] > 1 and pos.shape[2] == 3, "Required surface points shape is (v,u,3), where u >= 2 and v >= 2."
if pos.dtype != np.float32: pos = np.ascontiguousarray(pos, dtype=np.float32)
if not pos.flags['C_CONTIGUOUS']: pos = np.ascontiguousarray(pos, dtype=np.float32)
pos_ptr = pos.ctypes.data
if r is not None and geom == Geometry.Graph:
if not isinstance(r, np.ndarray): r = np.ascontiguousarray(r, dtype=np.float32)
if r.dtype != np.float32: r = np.ascontiguousarray(r, dtype=np.float32)
if len(r.shape) > 1 or r.shape[0] > 1:
assert r.shape == pos.shape[:2], "Radii shape must be (v,u), with u and v matching the surface points shape."
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if r is not None and geom == Geometry.Graph:
if r.shape[0] == 1:
r = np.full(pos.shape[:2], r[0], dtype=np.float32)
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if r.shape != pos.shape[:2]:
msg = "Radii (r) shape does not match the shape of preceding data arguments."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
radii_ptr = r.ctypes.data
else: radii_ptr = 0
n_ptr = 0
if normals is not None and geom == Geometry.Mesh:
if not isinstance(normals, np.ndarray): normals = np.ascontiguousarray(normals, dtype=np.float32)
assert normals.shape == pos.shape, "Normals shape must be (v,u,3), with u and v matching the surface points shape."
if normals.dtype != np.float32: normals = np.ascontiguousarray(normals, dtype=np.float32)
if not normals.flags['C_CONTIGUOUS']: normals = np.ascontiguousarray(normals, dtype=np.float32)
n_ptr = normals.ctypes.data
make_normals = False
c_ptr = 0
c_const_ptr = 0
if c is not None:
if isinstance(c, float) or isinstance(c, int): c = np.full(3, c, dtype=np.float32)
if not isinstance(c, np.ndarray): c = np.ascontiguousarray(c, dtype=np.float32)
if c.dtype != np.float32: c = np.ascontiguousarray(c, dtype=np.float32)
if not c.flags['C_CONTIGUOUS']: c = np.ascontiguousarray(c, dtype=np.float32)
if c.shape == (3,):
c_const_ptr = c.ctypes.data
elif c.shape == pos.shape:
c_ptr = c.ctypes.data
else:
msg = "Colors shape must be (3,) or (v,u,3), with u and v matching the surface points shape."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
try:
self._padlock.acquire()
self._logger.info("Setup surface %s...", name)
g_handle = self._optix.setup_psurface(geom.value, name, mat, pos.shape[1], pos.shape[0], pos_ptr, radii_ptr, n_ptr, c_const_ptr, c_ptr, wrap_u, wrap_v, make_normals)
if g_handle > 0:
self._logger.info("...done, handle: %d", g_handle)
self.geometry_data[name] = GeometryMeta(name, g_handle, pos.shape[0] * pos.shape[1])
self.geometry_names[g_handle] = name
else:
msg = "Surface setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def update_surface(self, name: str,
mat: Optional[str] = None,
pos: Optional[Any] = None,
r: Optional[Any] = None,
c: Optional[Any] = None,
normals: Optional[Any] = None) -> None:
"""Update surface geometry data or properties.
Parameters
----------
name : string
Name of the surface geometry.
mat : string, optional
Material name.
pos : array_like, optional
XYZ values of surface points.
r : Any, optional
Radii of vertices for the :attr:`plotoptix.enums.Geometry.Graph` geometry,
interpolated along the edges. Single value sets constant radius for all vertices.
c : Any, optional
Colors of surface points. Single value means a constant gray level.
3-component array means a constant RGB color. Array of the shape
``(n, m, 3)`` will set individual color for each surface point,
interpolated between points; ``n`` and ``m`` have to be the same
as in the surface points shape.
normals : array_like, optional
Normal vectors at provided surface points. Array shape has to be ``(n, m, 3)``,
with ``n`` and ``m`` the same as in the surface points shape.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if mat is None: mat = ""
if not name in self.geometry_data:
msg = "Surface %s does not exists yet, use set_surface() instead." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
s_u = c_uint()
s_v = c_uint()
if not self._optix.get_surface_size(name, byref(s_u), byref(s_v)):
msg = "Cannot get surface %s size." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
size_uv3 = (s_v.value, s_u.value, 3)
size_uv1 = (s_v.value, s_u.value)
size_changed = False
pos_ptr = 0
if pos is not None:
if not isinstance(pos, np.ndarray): pos = np.ascontiguousarray(pos, dtype=np.float32)
assert len(pos.shape) == 3 and pos.shape[0] > 1 and pos.shape[1] > 1 and pos.shape[2] == 3, "Required vertex data shape is (v,u,3), where u >= 2 and v >= 2."
if pos.dtype != np.float32: pos = np.ascontiguousarray(pos, dtype=np.float32)
if not pos.flags['C_CONTIGUOUS']: pos = np.ascontiguousarray(pos, dtype=np.float32)
if pos.shape != size_uv3: size_changed = True
size_uv3 = pos.shape
pos_ptr = pos.ctypes.data
if size_changed and r is None:
r = np.ascontiguousarray([0.05], dtype=np.float32)
if r is not None:
if not isinstance(r, np.ndarray): r = np.ascontiguousarray(r, dtype=np.float32)
if r.dtype != np.float32: r = np.ascontiguousarray(r, dtype=np.float32)
if len(r.shape) > 1 or r.shape[0] > 1:
assert r.shape == size_uv1, "Radii shape must be (v,u), with u and v matching the surface points shape."
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
radii_ptr = 0
if r is not None:
if r.shape[0] == 1:
r = np.full(size_uv1, r[0], dtype=np.float32)
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if size_uv1 != r.shape:
msg = "Radii (r) shape does not match the number of surface points."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
radii_ptr = r.ctypes.data
c_ptr = 0
c_const_ptr = 0
if size_changed and c is None: c = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32)
if c is not None:
if isinstance(c, float) or isinstance(c, int): c = np.full(3, c, dtype=np.float32)
if not isinstance(c, np.ndarray): c = np.ascontiguousarray(c, dtype=np.float32)
if c.dtype != np.float32: c = np.ascontiguousarray(c, dtype=np.float32)
if not c.flags['C_CONTIGUOUS']: c = np.ascontiguousarray(c, dtype=np.float32)
if c.shape == (3,):
c_const_ptr = c.ctypes.data
elif c.shape == size_uv3:
c_ptr = c.ctypes.data
else:
msg = "Colors shape must be (3,) or (v,u,3), with u and v matching the surface points shape."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
n_ptr = 0
if normals is not None:
if not isinstance(normals, np.ndarray): normals = np.ascontiguousarray(normals, dtype=np.float32)
assert normals.shape == size_uv3, "Normals shape must be (v,u,3), with u and v matching the surface points shape."
if normals.dtype != np.float32: normals = np.ascontiguousarray(normals, dtype=np.float32)
if not normals.flags['C_CONTIGUOUS']: normals = np.ascontiguousarray(normals, dtype=np.float32)
n_ptr = normals.ctypes.data
try:
self._padlock.acquire()
self._logger.info("Update surface %s, size (%d, %d)...", name, size_uv1[1], size_uv1[0])
g_handle = self._optix.update_psurface(name, mat, size_uv1[1], size_uv1[0], pos_ptr, radii_ptr, n_ptr, c_const_ptr, c_ptr)
if (g_handle > 0) and (g_handle == self.geometry_data[name]._handle):
self._logger.info("...done, handle: %d", g_handle)
self.geometry_data[name]._size = size_uv1[0] * size_uv1[1]
else:
msg = "Geometry update failed."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def set_graph(self, name: str,
pos: Optional[Any] = None, edges: Optional[Any] = None,
r: Optional[Any] = None, c: Optional[Any] = None,
mat: Optional[str] = None) -> None:
"""Create new or update existing graph (mesh wireframe) geometry.
Data is provided as vertices :math:`[x, y, z]`, with the shape ``(n, 3)``, and edges
(doublets of vertex indices), with the shape ``(n, 2)`` or ``(m)`` where :math:`m = 2*n`.
Data features can be visualized with colors (array of RGB values assigned to the graph
vertices, shape ``(n, 3)``) and/or vertex radii.
Note: not all arguments are used to update existing geeometry. Update is available for:
``mat``, ``pos``, ``edges``, ``r``, and ``c`` data.
Parameters
----------
name : string
Name of the new graph geometry.
pos : array_like, optional
XYZ values of the graph vertices.
edges : array_like, optional
Graph edges as indices (doublets) to vertices in the ``pos`` array.
r : Any, optional
Radii of vertices, interpolated along the edges. Single value sets constant
radius for all vertices.
c : Any, optional
Colors of the graph vertices. Single value means a constant gray level.
3-component array means a constant RGB color. Array of the shape
``(n, 3)`` will set individual color for each vertex, interpolated along
the edges; ``n`` has to be equal to the vertex number in ``pos`` array.
mat : string, optional
Material name.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name in self.geometry_data:
self.update_graph(name, mat=mat, pos=pos, edges=edges, r=r, c=c)
return
if pos is None or edges is None:
msg = "pos and edges arguments required for new geometries."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if r is None: r = np.ascontiguousarray([0.05], dtype=np.float32)
if c is None: c = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32)
if mat is None: mat = "diffuse"
if not isinstance(pos, np.ndarray): pos = np.ascontiguousarray(pos, dtype=np.float32)
assert len(pos.shape) == 2 and pos.shape[0] > 1 and pos.shape[1] == 3, "Required vertex data shape is (n,3), where n >= 2."
if pos.dtype != np.float32: pos = np.ascontiguousarray(pos, dtype=np.float32)
if not pos.flags['C_CONTIGUOUS']: pos = np.ascontiguousarray(pos, dtype=np.float32)
pos_ptr = pos.ctypes.data
n_vertices = pos.shape[0]
if not isinstance(edges, np.ndarray): edges = np.ascontiguousarray(edges, dtype=np.int32)
if edges.dtype != np.int32: edges = np.ascontiguousarray(edges, dtype=np.int32)
if not edges.flags['C_CONTIGUOUS']: edges = np.ascontiguousarray(edges, dtype=np.int32)
assert (len(edges.shape) == 2 and edges.shape[1] == 2) or (len(edges.shape) == 1 and (edges.shape[0] % 2 == 0)), "Required index shape is (n,2) or (m), where m is a multiple of 2."
edges_ptr = edges.ctypes.data
n_edges = edges.size // 2
if r is not None:
if not isinstance(r, np.ndarray): r = np.ascontiguousarray(r, dtype=np.float32)
if r.dtype != np.float32: r = np.ascontiguousarray(r, dtype=np.float32)
if len(r.shape) > 1: r = r.flatten()
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if r is not None:
if r.shape[0] == 1:
if n_vertices > 0: r = np.full(n_vertices, r[0], dtype=np.float32)
else:
msg = "Cannot resolve proper radii (r) shape from preceding data arguments."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if (n_vertices > 0) and (n_vertices != r.shape[0]):
msg = "Radii (r) shape does not match the shape of preceding data arguments."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
radii_ptr = r.ctypes.data
else: radii_ptr = 0
c = np.ascontiguousarray(c, dtype=np.float32)
if c.shape == (1,):
c = np.ascontiguousarray([c[0], c[0], c[0]], dtype=np.float32)
col_const_ptr = c.ctypes.data
col_ptr = 0
elif c.shape == (3,):
col_const_ptr = c.ctypes.data
col_ptr = 0
else:
c = _make_contiguous_3d(c, n=n_vertices, extend_scalars=True)
assert c.shape == pos.shape, "Colors shape must be (n,3), with n matching the number of graph vertices."
if c is not None: col_ptr = c.ctypes.data
else: col_ptr = 0
col_const_ptr = 0
try:
self._padlock.acquire()
self._logger.info("Setup graph %s...", name)
g_handle = self._optix.setup_graph(name, mat, n_vertices, n_edges, pos_ptr, radii_ptr, edges_ptr, col_const_ptr, col_ptr)
if g_handle > 0:
self._logger.info("...done, handle: %d", g_handle)
self.geometry_data[name] = GeometryMeta(name, g_handle, n_vertices)
self.geometry_names[g_handle] = name
else:
msg = "Graph setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def update_graph(self, name: str,
mat: Optional[str] = None,
pos: Optional[Any] = None,
edges: Optional[Any] = None,
r: Optional[Any] = None,
c: Optional[Any] = None) -> None:
"""Update data of an existing graph (mesh wireframe) geometry.
All data or only selected arrays may be uptated. If vertices and edges are left
unchanged then ``color`` and ``r`` array sizes should match the size of the graph,
i.e. existing ``pos`` shape.
Parameters
----------
name : string
Name of the graph geometry.
mat : string, optional
Material name.
pos : array_like, optional
XYZ values of the graph vertices.
edges : array_like, optional
Graph edges as indices (doublets) to the ``pos`` array.
r : Any, optional
Radii of vertices, interpolated along the edges. Single value sets
constant radius for all vertices.
c : Any, optional
Colors of graph vertices. Single value means a constant gray level.
3-component array means a constant RGB color. Array of the shape
``(n, 3)`` will set individual color for each vertex,
interpolated along edges; ``n`` has to be equal to the vertex
number in ``pos`` array.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if mat is None: mat = ""
if not name in self.geometry_data:
msg = "Graph %s does not exists yet, use set_graph() instead." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
m_vertices = self._optix.get_geometry_size(name)
#m_edges = self._optix.get_edges_count(name)
size_changed = False
pos_ptr = 0
n_vertices = 0
if pos is not None:
if not isinstance(pos, np.ndarray): pos = np.ascontiguousarray(pos, dtype=np.float32)
assert len(pos.shape) == 2 and pos.shape[0] > 1 and pos.shape[1] == 3, "Required vertex data shape is (n,3), where n >= 2."
if pos.dtype != np.float32: pos = np.ascontiguousarray(pos, dtype=np.float32)
if not pos.flags['C_CONTIGUOUS']: pos = np.ascontiguousarray(pos, dtype=np.float32)
if pos.shape[0] != m_vertices: size_changed = True
pos_ptr = pos.ctypes.data
n_vertices = pos.shape[0]
m_vertices = n_vertices
edges_ptr = 0
n_edges = 0
if edges is not None:
if not isinstance(edges, np.ndarray): edges = np.ascontiguousarray(edges, dtype=np.int32)
if edges.dtype != np.int32: edges = np.ascontiguousarray(edges, dtype=np.int32)
if not edges.flags['C_CONTIGUOUS']: edges = np.ascontiguousarray(edges, dtype=np.int32)
assert (len(edges.shape) == 2 and edges.shape[1] == 3) or (len(edges.shape) == 1 and (edges.shape[0] % 2 == 0)), "Required index shape is (n,3) or (m), where m is a multiple of 2."
edges_ptr = edges.ctypes.data
n_edges = edges.size // 2
#m_edges = n_edges
if size_changed and r is None:
r = np.ascontiguousarray([0.05], dtype=np.float32)
if r is not None:
if not isinstance(r, np.ndarray): r = np.ascontiguousarray(r, dtype=np.float32)
if r.dtype != np.float32: r = np.ascontiguousarray(r, dtype=np.float32)
if len(r.shape) > 1: r = r.flatten()
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
radii_ptr = 0
if r is not None:
if r.shape[0] == 1:
r = np.full(m_vertices, r[0], dtype=np.float32)
if not r.flags['C_CONTIGUOUS']: r = np.ascontiguousarray(r, dtype=np.float32)
if m_vertices != r.shape[0]:
msg = "Radii (r) shape does not match the number of graph vertices."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
radii_ptr = r.ctypes.data
c_ptr = 0
c_const_ptr = 0
if size_changed and c is None: c = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32)
if c is not None:
if isinstance(c, float) or isinstance(c, int): c = np.full(3, c, dtype=np.float32)
if not isinstance(c, np.ndarray): c = np.ascontiguousarray(c, dtype=np.float32)
if c.dtype != np.float32: c = np.ascontiguousarray(c, dtype=np.float32)
if not c.flags['C_CONTIGUOUS']: c = np.ascontiguousarray(c, dtype=np.float32)
if c.shape == (3,):
c_const_ptr = c.ctypes.data
elif c.shape == (m_vertices, 3):
c_ptr = c.ctypes.data
else:
msg = "Colors shape must be (n,3), with n matching the number of graph vertices."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
try:
self._padlock.acquire()
self._logger.info("Update graph %s...", name)
g_handle = self._optix.update_graph(name, mat, m_vertices, n_edges, pos_ptr, radii_ptr, edges_ptr, c_const_ptr, c_ptr)
if (g_handle > 0) and (g_handle == self.geometry_data[name]._handle):
self._logger.info("...done, handle: %d", g_handle)
self.geometry_data[name]._size = m_vertices
else:
msg = "Graph update failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def set_mesh(self, name: str, pos: Optional[Any] = None, faces: Optional[Any] = None,
c: Any = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32),
normals: Optional[Any] = None,
nidx: Optional[Any] = None,
uvmap: Optional[Any] = None,
uvidx: Optional[Any] = None,
mat: str = "diffuse",
make_normals: bool = False) -> None:
"""Create new or update existing mesh geometry.
Data is provided as vertices :math:`[x, y, z]`, with the shape ``(n, 3)``, and faces
(triplets of vertex indices), with the shape ``(n, 3)`` or ``(m)`` where :math:`m = 3*n`.
Data features can be visualized with color (array of RGB values assigned to the mesh
vertices, shape ``(n, 3)``).
Mesh ``normals`` can be provided as an array of 3D vectors. Mappng of normals to
faces can be provided as an array of ``nidx`` indexes. If mapping is not provided
then face vertex data is used (requires same number of vertices and normal vectors).
Smooth shading normals can be pre-calculated if ``make_normals=True`` and normals
data is not provided.
Texture UV mapping ``uvmap`` can be provided as an array of 2D vectors. Mappng of
UV coordinates to faces can be provided as an array of ``uvidx`` indexes. If mapping
is not provided then face vertex data is used (requires same number of vertices
and UV points).
Note: not all arguments are used to update existing geeometry. Update is available for:
``mat``, ``pos``, ``faces``, ``c``, ``normals``, ``nidx``, ``uvmap`` and ``uvidx`` data.
Parameters
----------
name : string
Name of the new mesh geometry.
pos : array_like, optional
XYZ values of the mesh vertices.
faces : array_like, optional
Mesh faces as indices (triplets) to the ``pos`` array.
c : Any, optional
Colors of mesh vertices. Single value means a constant gray level.
3-component array means a constant RGB color. Array of the shape
``(n, 3)`` will set individual color for each vertex,
interpolated on face surfaces; ``n`` has to be equal to the vertex
number in ``pos`` array.
normals : array_like, optional
Normal vectors.
nidx : array_like, optional
Normal to face mapping, ``faces`` is used if not provided.
uvmap : array_like, optional
Texture UV coordinates.
uvidx : array_like, optional
Texture UV to face mapping, ``faces`` is used if not provided.
mat : string, optional
Material name.
make_normals : bool, optional
Calculate smooth shading of the mesh, only if ``normals`` are not provided.
Normals of all triangles attached to the mesh vertex are averaged.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if name in self.geometry_data:
self.update_mesh(name, mat=mat, pos=pos, faces=faces,
c=c, normals=normals, nidx=nidx, uvmap=uvmap,
uvidx=uvidx)
return
if pos is None or faces is None:
msg = "pos and faces arguments required for new geometries."
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
if not isinstance(pos, np.ndarray): pos = np.ascontiguousarray(pos, dtype=np.float32)
assert len(pos.shape) == 2 and pos.shape[0] > 2 and pos.shape[1] == 3, "Required vertex data shape is (n,3), where n >= 3."
if pos.dtype != np.float32: pos = np.ascontiguousarray(pos, dtype=np.float32)
if not pos.flags['C_CONTIGUOUS']: pos = np.ascontiguousarray(pos, dtype=np.float32)
pos_ptr = pos.ctypes.data
n_vertices = pos.shape[0]
if not isinstance(faces, np.ndarray): faces = np.ascontiguousarray(faces, dtype=np.int32)
if faces.dtype != np.int32: faces = np.ascontiguousarray(faces, dtype=np.int32)
if not faces.flags['C_CONTIGUOUS']: faces = np.ascontiguousarray(faces, dtype=np.int32)
assert (len(faces.shape) == 2 and faces.shape[1] == 3) or (len(faces.shape) == 1 and (faces.shape[0] % 3 == 0)), "Required index shape is (n,3) or (m), where m is a multiple of 3."
faces_ptr = faces.ctypes.data
n_faces = faces.size // 3
n_colors = 0
c = np.ascontiguousarray(c, dtype=np.float32)
if c.shape == (1,):
c = np.ascontiguousarray([c[0], c[0], c[0]], dtype=np.float32)
col_const_ptr = c.ctypes.data
col_ptr = 0
elif c.shape == (3,):
col_const_ptr = c.ctypes.data
col_ptr = 0
else:
c = _make_contiguous_3d(c, n=n_vertices, extend_scalars=True)
n_colors = c.shape[0]
assert n_colors == n_vertices or n_colors == n_faces, "Colors shape must be (n,3), with n matching the number of mesh vertices or faces."
if c is not None: col_ptr = c.ctypes.data
else: col_ptr = 0
col_const_ptr = 0
n_ptr = 0
n_normals = 0
if normals is not None:
if not isinstance(normals, np.ndarray): normals = np.ascontiguousarray(normals, dtype=np.float32)
if nidx is None:
assert normals.shape == pos.shape, "If normal index data not provided, normals shape must be (n,3), with n matching the mesh vertex positions shape."
else:
assert len(normals.shape) == 2 and normals.shape[0] > 2 and normals.shape[1] == 3, "Required normals data shape is (n,3), where n >= 3."
if normals.dtype != np.float32: normals = np.ascontiguousarray(normals, dtype=np.float32)
if not normals.flags['C_CONTIGUOUS']: normals = np.ascontiguousarray(normals, dtype=np.float32)
n_ptr = normals.ctypes.data
n_normals = normals.shape[0]
make_normals = False
nidx_ptr = 0
if nidx is not None:
if not isinstance(nidx, np.ndarray): nidx = np.ascontiguousarray(nidx, dtype=np.int32)
if nidx.dtype != np.int32: nidx = np.ascontiguousarray(nidx, dtype=np.int32)
if not nidx.flags['C_CONTIGUOUS']: nidx = np.ascontiguousarray(nidx, dtype=np.int32)
assert np.array_equal(nidx.shape, faces.shape), "Required same shape of normal index and face index arrays."
nidx_ptr = nidx.ctypes.data
uv_ptr = 0
n_uv = 0
if uvmap is not None:
if not isinstance(uvmap, np.ndarray): uvmap = np.ascontiguousarray(uvmap, dtype=np.float32)
if uvidx is None:
assert uvmap.shape[0] == pos.shape[0], "If UV index data not provided, uvmap shape must be (n,2), with n matching the number of mesh vertices."
else:
assert len(uvmap.shape) == 2 and uvmap.shape[0] > 2 and uvmap.shape[1] == 2, "Required UV data shape is (n,2), where n >= 3."
if uvmap.dtype != np.float32: uvmap = np.ascontiguousarray(uvmap, dtype=np.float32)
if not uvmap.flags['C_CONTIGUOUS']: uvmap = np.ascontiguousarray(uvmap, dtype=np.float32)
uv_ptr = uvmap.ctypes.data
n_uv = uvmap.shape[0]
uvidx_ptr = 0
if uvidx is not None:
if not isinstance(uvidx, np.ndarray): uvidx = np.ascontiguousarray(uvidx, dtype=np.int32)
if uvidx.dtype != np.int32: uvidx = np.ascontiguousarray(uvidx, dtype=np.int32)
if not uvidx.flags['C_CONTIGUOUS']: uvidx = np.ascontiguousarray(uvidx, dtype=np.int32)
assert np.array_equal(uvidx.shape, faces.shape), "Required same shape of UV index and face index arrays."
uvidx_ptr = uvidx.ctypes.data
try:
self._padlock.acquire()
self._logger.info("Setup mesh %s...", name)
g_handle = self._optix.setup_mesh(name, mat, n_vertices, n_faces, n_colors, n_normals, n_uv, pos_ptr, faces_ptr, col_const_ptr, col_ptr, n_ptr, nidx_ptr, uv_ptr, uvidx_ptr, make_normals)
if g_handle > 0:
self._logger.info("...done, handle: %d", g_handle)
self.geometry_data[name] = GeometryMeta(name, g_handle, n_vertices)
self.geometry_names[g_handle] = name
else:
msg = "Mesh setup failed."
self._logger.error(msg)
if self._raise_on_error: raise RuntimeError(msg)
except Exception as e:
self._logger.error(str(e))
if self._raise_on_error: raise
finally:
self._padlock.release()
def update_mesh(self, name: str,
mat: Optional[str] = None,
pos: Optional[Any] = None,
faces: Optional[Any] = None,
c: Optional[Any] = None,
normals: Optional[Any] = None,
nidx: Optional[Any] = None,
uvmap: Optional[Any] = None,
uvidx: Optional[Any] = None) -> None:
"""Update data of an existing mesh geometry.
All data or only some of arrays may be uptated. If vertices and faces are left
unchanged then other arrays sizes should match the sizes of the mesh, i.e. ``c``
shape should match existing ``pos`` shape, ``nidx`` and ``uvidx`` shapes should
match ``faces`` shape or if index mapping is not provided then ``normals`` and
``uvmap`` shapes should match ``pos`` shape.
Parameters
----------
name : string
Name of the mesh geometry.
mat : string, optional
Material name.
pos : array_like, optional
XYZ values of the mesh vertices.
faces : array_like, optional
Mesh faces as indices (triplets) to the ``pos`` array.
c : Any, optional
Colors of mesh vertices. Single value means a constant gray level.
3-component array means a constant RGB color. Array of the shape
``(n, 3)`` will set individual color for each vertex,
interpolated on face surfaces; ``n`` has to be equal to the vertex
number in ``pos`` array.
normals : array_like, optional
Normal vectors.
nidx : array_like, optional
Normal to face mapping, existing mesh ``faces`` is used if not provided.
uvmap : array_like, optional
Texture UV coordinates.
uvidx : array_like, optional
Texture UV to face mapping, existing mesh ``faces`` is used if not provided.
"""
if name is None: raise ValueError()
if not isinstance(name, str): name = str(name)
if mat is None: mat = ""
if not name in self.geometry_data:
msg = "Mesh %s does not exists yet, use set_mesh() instead." % name
self._logger.error(msg)
if self._raise_on_error: raise ValueError(msg)
return
m_vertices = self._optix.get_geometry_size(name)
m_faces = self._optix.get_faces_count(name)
size_changed = False
pos_ptr = 0
n_vertices = 0
if pos is not None:
if not isinstance(pos, np.ndarray): pos = np.ascontiguousarray(pos, dtype=np.float32)
assert len(pos.shape) == 2 and pos.shape[0] > 2 and pos.shape[1] == 3, "Required vertex data shape is (n,3), where n >= 3."
if pos.dtype != np.float32: pos = np.ascontiguousarray(pos, dtype=np.float32)
if not pos.flags['C_CONTIGUOUS']: pos = np.ascontiguousarray(pos, dtype=np.float32)
if pos.shape[0] != m_vertices: size_changed = True
pos_ptr = pos.ctypes.data
n_vertices = pos.shape[0]
m_vertices = n_vertices
faces_ptr = 0
n_faces = 0
if faces is not None:
if not isinstance(faces, np.ndarray): faces = np.ascontiguousarray(faces, dtype=np.int32)
if faces.dtype != np.int32: faces = np.ascontiguousarray(faces, dtype=np.int32)
if not faces.flags['C_CONTIGUOUS']: faces = np.ascontiguousarray(faces, dtype=np.int32)
assert (len(faces.shape) == 2 and faces.shape[1] == 3) or (len(faces.shape) == 1 and (faces.shape[0] % 3 == 0)), "Required index shape is (n,3) or (m), where m is a multiple of 3."
faces_ptr = faces.ctypes.data
n_faces = faces.size // 3
m_faces = n_faces
c_ptr = 0
c_const_ptr = 0
if size_changed and c is None: c = np.ascontiguousarray([0.94, 0.94, 0.94], dtype=np.float32)
if c is not None:
if isinstance(c, float) or isinstance(c, int): c = np.full(3, c, dtype=np.float32)
if not isinstance(c, np.ndarray): c = np.ascontiguousarray(c, dtype=np.float32)
if c.dtype != np.float32: c =
|
np.ascontiguousarray(c, dtype=np.float32)
|
numpy.ascontiguousarray
|
#!/usr/bin/env python
import os
import copy
import pandas as pd
import numpy as np
import scipy.linalg as scipy_linalg
import matplotlib.pyplot as plt
# from scipy import signal
# import pandas as pd
# import datetime
# from toopazo_tools.time_series import TimeseriesTools as TSTools
from toopazo_tools.matplotlib import PlotTools, FigureTools
from toopazo_tools.pandas import PandasTools
# Check if this is running inside toopazo_ulg/ or deployed as a module
if os.path.isfile('parse_file.py'):
from parse_file import UlgParser
from plot_basics import UlgPlotBasics
else:
from toopazo_ulg.parse_file import UlgParser
from toopazo_ulg.plot_basics import UlgPlotBasics
class UlgPlotMixer(UlgPlotBasics):
"""
The purpose of plot_mixer.py is to run a linear fit based on
least square error for
input = xvect = actuator_outputs
output = yvect = actuator_controls
The important function is
[lsq_matrix, lsq_bias, lsq_error] =
UlgPlotMixer.least_square_fit(xvect, yvect)
And the prediction is output = lsq_matrix*input + lsq_bias
The topic toopazo_ctrlalloc contains both the input and output and is used
in the calculations
firefly_cifer = used by plot_sysid.py
firefly_mixer = used by plot_mixer.py for estimation/evaluation
housefly_mixer1 = used by plot_mixer.py for estimation/evaluation
housefly_mixer2 = used by plot_mixer.py for estimation/evaluation
"""
@staticmethod
def check_data(data, vmin, vmax):
# print('[check_data] type(data) %s' % type(data))
data = np.array(data)
shape_tuple = data.shape
if len(shape_tuple) == 1:
UlgPlotMixer.check_data1d(data, vmin, vmax)
else:
UlgPlotMixer.check_data2d(data, vmin, vmax)
@staticmethod
def check_data2d(data2d, vmin, vmax):
data2d =
|
np.array(data2d)
|
numpy.array
|
from PyQt5 import QtWidgets, QtGui, QtCore, uic
from pyqtgraph import PlotWidget
import pyqtgraph as pg
import sys
import math
import itertools
import numpy as np
import pyaudio
#import modulated_oscillator as m_o
import oscillators
import components
#import envelopes
BUFFER_SIZE = 256
SAMPLE_RATE = 44100
NOTE_AMP = 1
def get_sin_oscillator(freq, amp=1, phase=0, sample_rate=44100):
phase = (phase / 360) * 2 * math.pi
increment = (2 * math.pi * freq)/ sample_rate
return (math.sin(phase + v) * amp for v in itertools.count(start=0, step=increment))
def get_samples(notes_dict, num_samples=BUFFER_SIZE):
return [sum([int(next(osc) * 32767) \
for _, osc in notes_dict.items()]) \
for _ in range(num_samples)]
def osc_function(freq, amp, sample_rate, attack_duration, decay_duration, sustain_level, \
release_duration=0.3):
return iter(
components.Chain(
oscillators.SineOscillator(freq=freq,
amp=amp, sample_rate=sample_rate),
components.modifiers.ModulatedVolume(
components.envelopes.ADSREnvelope(attack_duration=(float(attack_duration)) / 10000, decay_duration=(float(decay_duration)) / 10000, sustain_level=(float(sustain_level)) / 10,
release_duration=(float(release_duration)) / 10000, sample_rate=sample_rate)
)
)
)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
#Load the UI Page
uic.loadUi('C:\\Users\\Sam\\AppData\\Local\\Programs\\Python\\Python38\\Music_Programs\\Music_Programs\\ADSR_Grapher\\embeddedGraphTest.ui', self) #TODO change to relative path
#freq line edit val
self.onlyDouble = QtGui.QDoubleValidator()
self.freqLineEdit.setValidator(self.onlyDouble)
self.notes_dict = {}
self.stream = pyaudio.PyAudio().open(
rate=SAMPLE_RATE,
channels=1,
format=pyaudio.paInt16,
output=True,
frames_per_buffer=BUFFER_SIZE
)
#init graph
self.sample_rate = 2048
self.osc = get_sin_oscillator(freq=1, sample_rate=512)
self.samples = [next(self.osc) for i in range(self.sample_rate*4)]
self.attack_length = self.sample_rate
self.decay_length = self.sample_rate
self.sustain_level = .4
self.sustain_length = self.sample_rate
self.release_length = self.sample_rate
self.attack = np.linspace(0, 1, self.attack_length)
self.decay = np.linspace(1, self.sustain_level, self.decay_length)
self.sustain = np.full((self.sustain_length, ), self.sustain_level)
self.release = np.linspace(self.sustain_level, 0, self.release_length)
self.adsr = np.concatenate( (self.attack, self.decay,self.sustain,self.release) )
self.graphWidget_ADSR.setTitle("ADSR Curve")
self.pen = pg.mkPen(color=(255, 0, 0))
self.attack_start = 0
self.attack_end = len(self.attack)
self.attack_line = self.graphWidget_ADSR.plot(range(self.attack_start,self.attack_end), self.attack, pen=self.pen)
self.attack_line_main = self.graphWidget_ADSR_SIG.plot(range(self.attack_start,self.attack_end), self.attack, pen=self.pen)
self.pen = pg.mkPen(color=(0, 255, 255))
self.decay_start = len(self.attack)
self.decay_end = len(self.attack)+len(self.decay)
self.decay_line = self.graphWidget_ADSR.plot(range(self.decay_start,self.decay_end), self.decay, pen=self.pen)
self.decay_line_main = self.graphWidget_ADSR_SIG.plot(range(self.decay_start,self.decay_end), self.decay, pen=self.pen)
self.pen = pg.mkPen(color=(255, 125, 0))
self.sus_start = len(self.attack)+len(self.decay)
self.sus_end = len(self.attack)+len(self.decay)+len(self.sustain)
self.sus_line = self.graphWidget_ADSR.plot(range(self.sus_start,self.sus_end), self.sustain, pen=self.pen)
self.sus_line_main = self.graphWidget_ADSR_SIG.plot(range(self.sus_start,self.sus_end), self.sustain, pen=self.pen)
self.pen = pg.mkPen(color=(0, 255, 125))
self.rel_start = len(self.attack)+len(self.decay)+len(self.sustain)
self.rel_end = len(self.attack)+len(self.decay)+len(self.sustain)+len(self.release)
self.rel_line = self.graphWidget_ADSR.plot(range(self.rel_start,self.rel_end), self.release, pen=self.pen)
self.rel_line_main = self.graphWidget_ADSR_SIG.plot(range(self.rel_start,self.rel_end), self.release, pen=self.pen)
self.graphWidget_SIG.setTitle("Input Signal Curve")
self.SIG_line = self.graphWidget_SIG.plot(range(0,self.sample_rate*4), self.samples)
self.graphWidget_ADSR_SIG.setTitle("ADSR * Input Signal Curve")
self.ADSR_SIG_line = self.graphWidget_ADSR_SIG.plot(range(0,len(self.adsr)), self.adsr*self.samples)
#button to play sound
self.b1.clicked.connect(self.play_sound)
self.b2.clicked.connect(self.play_ADSRsound)
#timer for updating graphs
self.timer = QtCore.QTimer()
self.timer.setInterval(50)
self.timer.timeout.connect(self.update_plot_data)
self.timer.start()
def play_sound(self):
self.notes_dict[0] = get_sin_oscillator(freq=float(self.freqLineEdit.text()), amp=1) #type generator
samples = get_samples(self.notes_dict)
samples =
|
np.int16(samples)
|
numpy.int16
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from batchgenerators.augmentations.utils import random_crop_2D_image_batched, pad_nd_image
import numpy as np
from batchgenerators.dataloading import SlimDataLoaderBase
from multiprocessing import Pool
import time
from nnunet.configuration import default_num_threads
from nnunet.paths import preprocessing_output_dir
from batchgenerators.utilities.file_and_folder_operations import *
def get_case_identifiers(folder):
case_identifiers = [i[:-4] for i in os.listdir(folder) if i.endswith("npz") and (i.find("segFromPrevStage") == -1)]
return case_identifiers
def get_case_identifiers_from_raw_folder(folder):
case_identifiers = np.unique(
[i[:-12] for i in os.listdir(folder) if i.endswith(".nii.gz") and (i.find("segFromPrevStage") == -1)])
return case_identifiers
def convert_to_npy(args):
if not isinstance(args, tuple):
key = "data"
npz_file = args
else:
npz_file, key = args
if not isfile(npz_file[:-3] + "npy"):
a = np.load(npz_file)[key]
np.save(npz_file[:-3] + "npy", a)
def save_as_npz(args):
if not isinstance(args, tuple):
key = "data"
npy_file = args
else:
npy_file, key = args
d = np.load(npy_file)
np.savez_compressed(npy_file[:-3] + "npz", **{key: d})
def unpack_dataset(folder, threads=default_num_threads, key="data"):
"""
unpacks all npz files in a folder to npy (whatever you want to have unpacked must be saved unter key)
:param folder:
:param threads:
:param key:
:return:
"""
p = Pool(threads)
npz_files = subfiles(folder, True, None, ".npz", True)
p.map(convert_to_npy, zip(npz_files, [key] * len(npz_files)))
p.close()
p.join()
def pack_dataset(folder, threads=default_num_threads, key="data"):
p = Pool(threads)
npy_files = subfiles(folder, True, None, ".npy", True)
p.map(save_as_npz, zip(npy_files, [key] * len(npy_files)))
p.close()
p.join()
def delete_npy(folder):
case_identifiers = get_case_identifiers(folder)
npy_files = [join(folder, i + ".npy") for i in case_identifiers]
npy_files = [i for i in npy_files if isfile(i)]
for n in npy_files:
os.remove(n)
def load_dataset(folder, num_cases_properties_loading_threshold=1000):
# we don't load the actual data but instead return the filename to the np file.
print('loading dataset')
case_identifiers = get_case_identifiers(folder)
case_identifiers.sort()
dataset = OrderedDict()
for c in case_identifiers:
dataset[c] = OrderedDict()
dataset[c]['data_file'] = join(folder, "%s.npz" % c)
# dataset[c]['properties'] = load_pickle(join(folder, "%s.pkl" % c))
dataset[c]['properties_file'] = join(folder, "%s.pkl" % c)
if dataset[c].get('seg_from_prev_stage_file') is not None:
dataset[c]['seg_from_prev_stage_file'] = join(folder, "%s_segs.npz" % c)
if len(case_identifiers) <= num_cases_properties_loading_threshold:
print('loading all case properties')
for i in dataset.keys():
dataset[i]['properties'] = load_pickle(dataset[i]['properties_file'])
return dataset
def crop_2D_image_force_fg(img, crop_size, valid_voxels):
"""
img must be [c, x, y]
img[-1] must be the segmentation with segmentation>0 being foreground
:param img:
:param crop_size:
:param valid_voxels: voxels belonging to the selected class
:return:
"""
assert len(valid_voxels.shape) == 2
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * (len(img.shape) - 1)
else:
assert len(crop_size) == (len(
img.shape) - 1), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
# we need to find the center coords that we can crop to without exceeding the image border
lb_x = crop_size[0] // 2
ub_x = img.shape[1] - crop_size[0] // 2 - crop_size[0] % 2
lb_y = crop_size[1] // 2
ub_y = img.shape[2] - crop_size[1] // 2 - crop_size[1] % 2
if len(valid_voxels) == 0:
selected_center_voxel = (np.random.random_integers(lb_x, ub_x),
np.random.random_integers(lb_y, ub_y))
else:
selected_center_voxel = valid_voxels[np.random.choice(valid_voxels.shape[1]), :]
selected_center_voxel = np.array(selected_center_voxel)
for i in range(2):
selected_center_voxel[i] = max(crop_size[i] // 2, selected_center_voxel[i])
selected_center_voxel[i] = min(img.shape[i + 1] - crop_size[i] // 2 - crop_size[i] % 2,
selected_center_voxel[i])
result = img[:, (selected_center_voxel[0] - crop_size[0] // 2):(
selected_center_voxel[0] + crop_size[0] // 2 + crop_size[0] % 2),
(selected_center_voxel[1] - crop_size[1] // 2):(
selected_center_voxel[1] + crop_size[1] // 2 + crop_size[1] % 2)]
return result
class OriDataLoader3D(SlimDataLoaderBase):
def __init__(self, data, patch_size, final_patch_size, batch_size, has_prev_stage=False,
oversample_foreground_percent=0.0, memmap_mode="r", pad_mode="edge", pad_kwargs_data=None,
pad_sides=None):
"""
This is the basic data loader for 3D networks. It uses preprocessed data as produced by my (Fabian) preprocessing.
You can load the data with load_dataset(folder) where folder is the folder where the npz files are located. If there
are only npz files present in that folder, the data loader will unpack them on the fly. This may take a while
and increase CPU usage. Therefore, I advise you to call unpack_dataset(folder) first, which will unpack all npz
to npy. Don't forget to call delete_npy(folder) after you are done with training?
Why all the hassle? Well the decathlon dataset is huge. Using npy for everything will consume >1 TB and that is uncool
given that I (Fabian) will have to store that permanently on /datasets and my local computer. With this strategy all
data is stored in a compressed format (factor 10 smaller) and only unpacked when needed.
:param data: get this with load_dataset(folder, stage=0). Plug the return value in here and you are g2g (good to go)
:param patch_size: what patch size will this data loader return? it is common practice to first load larger
patches so that a central crop after data augmentation can be done to reduce border artifacts. If unsure, use
get_patch_size() from data_augmentation.default_data_augmentation
:param final_patch_size: what will the patch finally be cropped to (after data augmentation)? this is the patch
size that goes into your network. We need this here because we will pad patients in here so that patches at the
border of patients are sampled properly
:param batch_size:
:param num_batches: how many batches will the data loader produce before stopping? None=endless
:param seed:
:param stage: ignore this (Fabian only)
:param random: Sample keys randomly; CAREFUL! non-random sampling requires batch_size=1, otherwise you will iterate batch_size times over the dataset
:param oversample_foreground: half the batch will be forced to contain at least some foreground (equal prob for each of the foreground classes)
"""
super(OriDataLoader3D, self).__init__(data, batch_size, None)
if pad_kwargs_data is None:
pad_kwargs_data = OrderedDict()
self.pad_kwargs_data = pad_kwargs_data
self.pad_mode = pad_mode
self.oversample_foreground_percent = oversample_foreground_percent
self.final_patch_size = final_patch_size
self.has_prev_stage = has_prev_stage
self.patch_size = patch_size
self.list_of_keys = list(self._data.keys())
# need_to_pad denotes by how much we need to pad the data so that if we sample a patch of size final_patch_size
# (which is what the network will get) these patches will also cover the border of the patients
self.need_to_pad = (np.array(patch_size) - np.array(final_patch_size)).astype(int)
if pad_sides is not None:
if not isinstance(pad_sides, np.ndarray):
pad_sides = np.array(pad_sides)
self.need_to_pad += pad_sides
self.memmap_mode = memmap_mode
self.num_channels = None
self.pad_sides = pad_sides
self.data_shape, self.seg_shape = self.determine_shapes()
def get_do_oversample(self, batch_idx):
return not batch_idx < round(self.batch_size * (1 - self.oversample_foreground_percent))
def determine_shapes(self):
if self.has_prev_stage:
num_seg = 2
else:
num_seg = 1
k = list(self._data.keys())[0]
if isfile(self._data[k]['data_file'][:-4] + ".npy"):
case_all_data = np.load(self._data[k]['data_file'][:-4] + ".npy", self.memmap_mode)
else:
case_all_data = np.load(self._data[k]['data_file'])['data']
num_color_channels = case_all_data.shape[0] - 1
data_shape = (self.batch_size, num_color_channels, *self.patch_size)
seg_shape = (self.batch_size, num_seg, *self.patch_size)
return data_shape, seg_shape
def generate_train_batch(self):
selected_keys = np.random.choice(self.list_of_keys, self.batch_size, True, None)
data = np.zeros(self.data_shape, dtype=np.float32)
seg = np.zeros(self.seg_shape, dtype=np.float32)
case_properties = []
for j, i in enumerate(selected_keys):
# oversampling foreground will improve stability of model training, especially if many patches are empty
# (Lung for example)
if self.get_do_oversample(j):
force_fg = True
else:
force_fg = False
if 'properties' in self._data[i].keys():
properties = self._data[i]['properties']
else:
properties = load_pickle(self._data[i]['properties_file'])
case_properties.append(properties)
# cases are stored as npz, but we require unpack_dataset to be run. This will decompress them into npy
# which is much faster to access
if isfile(self._data[i]['data_file'][:-4] + ".npy"):
case_all_data = np.load(self._data[i]['data_file'][:-4] + ".npy", self.memmap_mode)
else:
case_all_data = np.load(self._data[i]['data_file'])['data']
# print(np.unique(case_all_data[-1],return_counts=True))
# If we are doing the cascade then we will also need to load the segmentation of the previous stage and
# concatenate it. Here it will be concatenates to the segmentation because the augmentations need to be
# applied to it in segmentation mode. Later in the data augmentation we move it from the segmentations to
# the last channel of the data
if self.has_prev_stage:
if isfile(self._data[i]['seg_from_prev_stage_file'][:-4] + ".npy"):
segs_from_previous_stage = np.load(self._data[i]['seg_from_prev_stage_file'][:-4] + ".npy",
mmap_mode=self.memmap_mode)[None]
else:
segs_from_previous_stage = np.load(self._data[i]['seg_from_prev_stage_file'])['data'][None]
# we theoretically support several possible previsous segmentations from which only one is sampled. But
# in practice this feature was never used so it's always only one segmentation
seg_key = np.random.choice(segs_from_previous_stage.shape[0])
seg_from_previous_stage = segs_from_previous_stage[seg_key:seg_key + 1]
assert all([i == j for i, j in zip(seg_from_previous_stage.shape[1:], case_all_data.shape[1:])]), \
"seg_from_previous_stage does not match the shape of case_all_data: %s vs %s" % \
(str(seg_from_previous_stage.shape[1:]), str(case_all_data.shape[1:]))
else:
seg_from_previous_stage = None
# do you trust me? You better do. Otherwise you'll have to go through this mess and honestly there are
# better things you could do right now
# (above) documentation of the day. Nice. Even myself coming back 1 months later I have not friggin idea
# what's going on. I keep the above documentation just for fun but attempt to make things clearer now
need_to_pad = self.need_to_pad
for d in range(3):
# if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides
# always
if need_to_pad[d] + case_all_data.shape[d + 1] < self.patch_size[d]:
need_to_pad[d] = self.patch_size[d] - case_all_data.shape[d + 1]
# we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we
# define what the upper and lower bound can be to then sample form them with np.random.randint
shape = case_all_data.shape[1:]
lb_x = - need_to_pad[0] // 2
ub_x = shape[0] + need_to_pad[0] // 2 + need_to_pad[0] % 2 - self.patch_size[0]
lb_y = - need_to_pad[1] // 2
ub_y = shape[1] + need_to_pad[1] // 2 + need_to_pad[1] % 2 - self.patch_size[1]
lb_z = - need_to_pad[2] // 2
ub_z = shape[2] + need_to_pad[2] // 2 + need_to_pad[2] % 2 - self.patch_size[2]
# if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get
# at least one of the foreground classes in the patch
if not force_fg:
bbox_x_lb =
|
np.random.randint(lb_x, ub_x + 1)
|
numpy.random.randint
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import numpy as np
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, KFold, GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn_lvq import GrmlvqModel, MrslvqModel, LgmlvqModel, LmrslvqModel
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, cohen_kappa_score, confusion_matrix, make_scorer
from imblearn.under_sampling import RandomUnderSampler
import GPy
import GPyOpt
NUM_EVAL_RUNS = 5
NUM_KFOLD_SPLITS = 2
TEST_SIZE = 0.5
scoring_function = f1_score
def bayesopt_logisticregression(X, y):
domain =[{'name': 'C', 'type': 'continuous', 'domain': (0.0, 10.0)}]
def fit_model(x):
x = np.atleast_2d(np.exp(x))
fs = np.zeros((x.shape[0],1))
for i in range(x.shape[0]):
fs[i] = 0
kf = KFold(n_splits=NUM_KFOLD_SPLITS, shuffle=True)
for train_index, test_index in kf.split(X_):
X_train, X_test = X_[train_index], X_[test_index]
y_train, y_test = y_[train_index], y_[test_index]
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = LogisticRegression(C=x[i, 0])
model.fit(X_train, y_train)
y_test_pred = model.predict(X_test)
score = -1.0 * scoring_function(y_test, y_test_pred)
fs[i] += score
fs[i] *= 1.0 / NUM_KFOLD_SPLITS
return fs
# Create otimizer
opt = GPyOpt.methods.BayesianOptimization(f=fit_model, domain=domain, acquisition_type='EI', acquisition_weight=0.1)
opt.run_optimization(max_iter=20)
params = np.exp(opt.X[np.argmin(opt.Y)])
return {'C': params[0]}
def gridsearch_logisticregression(X, y):
params = {'C':
|
np.logspace(-2, 6, 10)
|
numpy.logspace
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 16:41:27 2018
@author: eee
"""
from make_parameter import PreParam
import numpy as np
from pyomo.opt import SolverFactory
from pyomo.opt.parallel import SolverManagerFactory
from pyomo.environ import *
import pickle
from copy import deepcopy, copy
from collections import Counter
import multiprocessing as mp
import time
import cvxpy as cp
from itertools import combinations
from operator import itemgetter
from numpy import linalg as LA
import mosek
class OptLin(PreParam):
def __init__(self, path_casedata, set_disturb, mode):
super(OptLin,self).__init__(path_casedata, set_disturb, mode)
self.optsolver = SolverFactory('ipopt')
self.optsolver.set_options('constr_viol_tol=1e-10')
if mode == 20: self.opt_lin()
elif mode == 21: self.opt_lin_sdp_large()
elif mode == 22: self.opt_lin_sdp_small()
elif mode == 23: self.opt_lin_sdp_small_admm()
elif mode == 24: self.opt_lin_sdp_small_admm_fs()
def opt_lin(self):
'''
solve the dvid model in centralized method with IPOPT with linearized power flow model
'''
opt_sm = ConcreteModel()
#===================== variables=========================
# problem variables x_ls
# index of opt variables and initial values of opt variables
index_w = list()
index_theta = list()
w_0 = dict()
theta_0 = dict()
for (k, i) in self.set_k_i:
for j in range(self.param_dd[k[0]][i][2]+1):
# differential variables and algebraic variables
for i_bus in self.i_gen:
index_w.append( (k[0], k[1], i, j, i_bus) )
w_0[(k[0], k[1], i, j, i_bus)] = self.w_0[i_bus-1]
for i_bus in self.i_all:
index_theta.append( (k[0], k[1], i, j, i_bus) )
theta_0[(k[0], k[1], i, j, i_bus)] = self.theta_0[i_bus-1]
opt_sm.m = Var(self.i_gen, initialize = self.m_0)
opt_sm.d = Var(self.i_gen, initialize = self.d_0)
opt_sm.w = Var(index_w, initialize = w_0)
opt_sm.theta = Var(index_theta, initialize = theta_0)
opt_sm.con = ConstraintList()
J_ki = dict()
for (k, i) in self.set_k_i:
########### J_ki ###############
print( k,i)
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, 0)
nc = self.param_dd[k[0]][i][2] # order of disturbance k[0] (the same type of disturbances is with the same
s_tau= self.param_dd[k[0]][i][3] # collocation points, tau, of disturbance k[0]
h = self.param_dd[k[0]][i][1] - self.param_dd[k[0]][i][0] # length of time element i for disturbance k[0]
J_theta = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * sum( ( opt_sm.theta[(k[0], k[1], i, j1, i_bus_f)] - opt_sm.theta[(k[0], k[1], i, j1, i_bus_t)] ) * ( opt_sm.theta[(k[0], k[1], i, j2, i_bus_f)] - opt_sm.theta[(k[0], k[1], i, j2, i_bus_t)] ) * ratio_B[i_bus_f - 1, i_bus_t - 1] for (i_bus_f, i_bus_t) in self.ind_branch ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_w = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * sum( ( opt_sm.w[(k[0], k[1], i, j1, i_bus)] ) * ( opt_sm.w[(k[0], k[1], i, j2, i_bus)] ) for i_bus in self.i_gen ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_der_w = (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * sum( ( opt_sm.w[(k[0], k[1], i, j1, i_bus)] ) * ( opt_sm.w[(k[0], k[1], i, j2, i_bus)] ) for i_bus in self.i_gen ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ce_gen = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * ( sum( opt_sm.d[i_bus]**2 * ( opt_sm.w[(k[0], k[1], i, j1, i_bus)] ) * ( opt_sm.w[(k[0], k[1], i, j2, i_bus)] ) for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) ) \
+ (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * ( sum( opt_sm.m[i_bus]**2 * ( opt_sm.w[(k[0], k[1], i, j1, i_bus)] ) * ( opt_sm.w[(k[0], k[1], i, j2, i_bus)] ) for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) ) \
+ 2 *(1/h) * sum( sum( self.int_l[3, nc, s_tau, j1, j2] * ( sum( opt_sm.m[i_bus]* opt_sm.d[i_bus] * ( opt_sm.w[(k[0], k[1], i, j1, i_bus)] ) * ( opt_sm.w[(k[0], k[1], i, j2, i_bus)] ) for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ce_load = (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * ( sum( (self.D[i_bus-1])**2 * ( opt_sm.theta[(k[0], k[1], i, j1, i_bus)] ) * ( opt_sm.theta[(k[0], k[1], i, j2, i_bus)] ) for i_bus in self.i_load ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ki[(k,i)] = h * (J_theta + J_w + J_der_w + J_ce_gen + J_ce_load) * self.casedata['disturbance'][k[0]][k[1]-1][-1]
###########constrains of collocation equations###############
for r in range(1, nc+1): # for each colloction point
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
# der(theta) = w for all generator/inverter busese, i.e., i_gen
for i_bus in self.i_gen:
opt_sm.con.add( sum( opt_sm.theta[(k[0], k[1], i, j, i_bus)] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) == h * opt_sm.w[(k[0], k[1], i, r, i_bus)] )
# der (theta) for load buses.
for i_bus in self.i_load:
opt_sm.con.add( sum( opt_sm.theta[(k[0], k[1], i, j, i_bus)] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) ==
h * (1/self.D[i_bus-1]) * (self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* (np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( (opt_sm.theta[(k[0], k[1], i, r, i_bus)] - opt_sm.theta[(k[0], k[1], i, r, j_bus)]) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) )
for j_bus in self.i_all) ) )
# der w = for all generators
for i_bus in self.i_gen:
opt_sm.con.add( sum( opt_sm.w[(k[0], k[1], i, j, i_bus)] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) == h * (1/ opt_sm.m[i_bus]) * ( - opt_sm.d[i_bus] * opt_sm.w[(k[0], k[1], i, r, i_bus)] + self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
*( np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( (opt_sm.theta[(k[0], k[1], i, r, i_bus)] - opt_sm.theta[(k[0], k[1], i, r, j_bus)]) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) )
for j_bus in self.i_all)) )
# 0 = g for non load & generator buses
if i == 1: ii = 1
else: ii=0
for r in range(ii, nc+1): # for each colloction point including r=0
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
for i_bus in self.i_non:
opt_sm.con.add( 0 == self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* (np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( (opt_sm.theta[(k[0], k[1], i, r, i_bus)] - opt_sm.theta[(k[0], k[1], i, r, j_bus)]) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ))
for j_bus in self.i_all) )
########### frequency constraints, resources constraints, and also constraints for m and d ################
for r in range(1, nc+1): # for each colloction point
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
# frequency constraints w_l(t) <= w(t) <= w_u(t) for all generator/inverter busese, i.e., i_gen
for i_bus in self.i_gen:
for key_fb in self.casedata['freq_band'][k[0]]:
if self.param_dd[k[0]][i][0] + h * s_tau[r] > key_fb[0] and self.param_dd[k[0]][i][0] + h * s_tau[r] <= key_fb[1]:
opt_sm.con.add( opt_sm.w[(k[0], k[1], i, r, i_bus)] >= (self.casedata['freq_band'][k[0]][key_fb][0] - 50) * 2 * np.pi )
opt_sm.con.add( opt_sm.w[(k[0], k[1], i, r, i_bus)] <= (self.casedata['freq_band'][k[0]][key_fb][1] - 50) * 2 * np.pi )
break
# branch rotor angle difference constraints
for (i_bus_f, i_bus_t) in self.ind_branch:
if ratio_B[i_bus_f-1, i_bus_t-1] != 0:
opt_sm.con.add( ( opt_sm.theta[(k[0], k[1], i, r, i_bus_f)] - opt_sm.theta[(k[0], k[1], i, r, i_bus_t)] ) <= 135/180*np.pi )
opt_sm.con.add( ( opt_sm.theta[(k[0], k[1], i, r, i_bus_f)] - opt_sm.theta[(k[0], k[1], i, r, i_bus_t)] ) >= -135/180*np.pi )
# resources constraints p_l <= p - m*der(w) - d*w <= p_u for all generators, and also constraints for m and d
for i_bus in self.i_gen:
i_gc = np.where(self.casedata['gencontrol'][:,0]==i_bus)[0]
opt_sm.con.add( self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - (1/h) *opt_sm.m[i_bus] * sum( opt_sm.w[(k[0], k[1], i, j, i_bus)] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) - opt_sm.d[i_bus] * opt_sm.w[(k[0], k[1], i, r, i_bus)] >= self.casedata['gencontrol'][i_gc, 6][0] )
opt_sm.con.add( self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - (1/h) *opt_sm.m[i_bus] * sum( opt_sm.w[(k[0], k[1], i, j, i_bus)] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) - opt_sm.d[i_bus] * opt_sm.w[(k[0], k[1], i, r, i_bus)] <= self.casedata['gencontrol'][i_gc, 7][0] )
for i_bus in self.i_gen:
i_gc = np.where(self.casedata['gencontrol'][:,0]==i_bus)[0]
opt_sm.con.add( opt_sm.m[i_bus] >= self.casedata['gencontrol'][i_gc, 2][0])
opt_sm.con.add( opt_sm.m[i_bus] <= self.casedata['gencontrol'][i_gc, 3][0])
opt_sm.con.add( opt_sm.d[i_bus] >= self.casedata['gencontrol'][i_gc, 4][0])
opt_sm.con.add( opt_sm.d[i_bus] <= self.casedata['gencontrol'][i_gc, 5][0])
# continuity constraints of differential variable profiles across time element boundaries within the subproblem ls, i.e., x[i, 0] = x[i-1, n_c], and also initial value constraints for the first time element.
for (k, i) in self.set_k_i:
if i == 1: # whether time element i is the first time element, if yes, add initial value constraints
for i_bus in self.i_gen:
opt_sm.con.add( opt_sm.w[(k[0], k[1], i, 0, i_bus)] == self.w_0[i_bus-1] )
for i_bus in self.i_gen + self.i_load + self.i_non:
opt_sm.con.add( opt_sm.theta[(k[0], k[1], i, 0, i_bus)] == self.theta_0[i_bus-1] )
elif (k, i - 1) in self.set_k_i: # whether two adjacent time elements are in the subproblem.
nc = self.param_dd[k[0]][i-1][2]
for i_bus in self.i_gen:
opt_sm.con.add( opt_sm.w[(k[0], k[1], i, 0, i_bus)] == opt_sm.w[(k[0], k[1], i-1 , nc, i_bus)] )
for i_bus in self.i_gen + self.i_load:
opt_sm.con.add( opt_sm.theta[(k[0], k[1], i, 0, i_bus)] == opt_sm.theta[(k[0], k[1], i-1, nc, i_bus)] )
###### objective function ###############
J = sum(J_ki[(k,i)] for (k, i) in self.set_k_i)
opt_sm.J = J
opt_sm.obj = Objective( expr = opt_sm.J , sense=minimize)
############# solver ################
solver = SolverFactory('ipopt')
solver.set_options('constr_viol_tol=1e-10')
solver.solve(opt_sm,tee=True)
self.opt_result = deepcopy(opt_sm)
def opt_lin_sdp_large(self):
'''
lineralized power flow mode and SDP
'''
X = dict()
n_X = dict()
for (k, i) in self.set_k_i:
n_X = 2 * len(self.i_gen) + (self.param_dd[k[0]][i][2]+1) * (self.n_bus + len(self.i_gen)) + 2 * (self.param_dd[k[0]][i][2]+1) * len(self.i_gen)
X[(k[0], k[1], i)] = cp.Variable((1 + n_X, 1 + n_X), symmetric =True)
# index map between X and [m, d, theta, w, lm, ld]
idm = dict() # index map
for (k, i) in self.set_k_i:
idm[(k[0], k[1], i)] = dict()
i_X = 1
for i_bus in self.i_gen:
idm[(k[0], k[1], i)][('m', i_bus)] = i_X
i_X += 1
for i_bus in self.i_gen:
idm[(k[0], k[1], i)][('d', i_bus)] = i_X
i_X += 1
for j in range(self.param_dd[k[0]][i][2]+1):
for i_bus in self.i_all:
idm[(k[0], k[1], i)][('theta', j, i_bus)] = i_X
i_X += 1
for j in range(self.param_dd[k[0]][i][2]+1):
for i_bus in self.i_gen:
idm[(k[0], k[1], i)][('w', j, i_bus)] = i_X
i_X += 1
for j in range(self.param_dd[k[0]][i][2]+1):
for i_bus in self.i_gen:
idm[(k[0], k[1], i)][('lm', j, i_bus)] = i_X
i_X += 1
for j in range(self.param_dd[k[0]][i][2]+1):
for i_bus in self.i_gen:
idm[(k[0], k[1], i)][('ld', j, i_bus)] = i_X
i_X += 1
self.idm = idm
constraints = list()
J_ki = dict()
for (k, i) in self.set_k_i:
########### J_ki ###############
print( k,i)
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, 0)
nc = self.param_dd[k[0]][i][2] # order of disturbance k[0] (the same type of disturbances is with the same
s_tau= self.param_dd[k[0]][i][3] # collocation points, tau, of disturbance k[0]
h = self.param_dd[k[0]][i][1] - self.param_dd[k[0]][i][0] # length of time element i for disturbance k[0]
J_theta = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * sum(
(
X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', j1, i_bus_f)], idm[(k[0], k[1], i)][('theta', j2, i_bus_f)]]
- X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', j1, i_bus_f)], idm[(k[0], k[1], i)][('theta', j2, i_bus_t)]]
- X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', j1, i_bus_t)], idm[(k[0], k[1], i)][('theta', j2, i_bus_f)]]
+ X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', j1, i_bus_t)], idm[(k[0], k[1], i)][('theta', j2, i_bus_t)]]
) * ratio_B[i_bus_f - 1, i_bus_t - 1] for (i_bus_f, i_bus_t) in self.ind_branch ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_w = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * sum(
X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('w', j1, i_bus)], idm[(k[0], k[1], i)][('w', j2, i_bus)]] for i_bus in self.i_gen ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_der_w = (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * sum(
X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('w', j1, i_bus)], idm[(k[0], k[1], i)][('w', j2, i_bus)]] for i_bus in self.i_gen ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ce_gen = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * ( sum( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('ld', j1, i_bus)], idm[(k[0], k[1], i)][('ld', j2, i_bus)]] for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) ) \
+ (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * ( sum( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('lm', j1, i_bus)], idm[(k[0], k[1], i)][('lm', j2, i_bus)]] for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) ) \
+ 2 *(1/h) * sum( sum( self.int_l[3, nc, s_tau, j1, j2] * ( sum( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('lm', j1, i_bus)], idm[(k[0], k[1], i)][('ld', j2, i_bus)]] for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ce_load = (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * ( sum( (self.D[i_bus-1])**2 * X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', j1, i_bus)], idm[(k[0], k[1], i)][('theta', j2, i_bus)]] for i_bus in self.i_load ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ki[(k,i)] = h * (J_theta + J_w + J_der_w + J_ce_gen + J_ce_load) * self.casedata['disturbance'][k[0]][k[1]-1][-1]
###########constrains of collocation equations###############
for r in range(1, nc+1): # for each colloction point
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
# der(theta) = w for all generator/inverter busese, i.e., i_gen
for i_bus in self.i_gen:
constraints.append( sum( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', j, i_bus)]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) == h * X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('w', r, i_bus)]] )
# der (theta) for load buses.
for i_bus in self.i_load:
constraints.append( sum( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', j, i_bus)]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) ==
h * (1/self.D[i_bus-1]) * (self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* (np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( ( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, i_bus)]] - X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, j_bus)]] ) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) ) for j_bus in self.i_all) ) )
# der w = for all generators
for i_bus in self.i_gen:
constraints.append( sum( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('m', i_bus)], idm[(k[0], k[1], i)][('w', j, i_bus)]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) == h * ( - X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('d', i_bus)], idm[(k[0], k[1], i)][('w', r, i_bus)]] + self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* ( np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( ( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, i_bus)]] - X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, j_bus)]] ) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) ) for j_bus in self.i_all)) )
# 0 = g for non load & generator buses
if i == 1: ii = 1
else: ii=0
for r in range(ii, nc+1): # for each colloction point including r=0
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
for i_bus in self.i_non:
constraints.append( 0 == self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* ( np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( ( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, i_bus)]] - X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, j_bus)]] ) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) ) for j_bus in self.i_all) )
########### frequency constraints, resources constraints, and also constraints for m and d ################
for r in range(1, nc+1): # for each colloction point
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
# frequency constraints w_l(t) <= w(t) <= w_u(t) for all generator/inverter busese, i.e., i_gen
for i_bus in self.i_gen:
for key_fb in self.casedata['freq_band'][k[0]]:
if self.param_dd[k[0]][i][0] + h * s_tau[r] > key_fb[0] and self.param_dd[k[0]][i][0] + h * s_tau[r] <= key_fb[1]:
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('w', r, i_bus)]] >= (self.casedata['freq_band'][k[0]][key_fb][0] - 50) * 2 * np.pi )
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('w', r, i_bus)]] <= (self.casedata['freq_band'][k[0]][key_fb][1] - 50) * 2 * np.pi )
break
# branch rotor angle difference constraints
for (i_bus_f, i_bus_t) in self.ind_branch:
if ratio_B[i_bus_f-1, i_bus_t-1] != 0:
constraints.append( ( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, i_bus_f)]] - X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, i_bus_t)]] ) <= 135/180*np.pi )
constraints.append( ( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, i_bus_f)]] - X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', r, i_bus_t)]] ) >= -135/180*np.pi )
# resources constraints p_l <= p - m*der(w) - d*w <= p_u for all generators, and also constraints for m and d
for i_bus in self.i_gen:
i_gc = np.where(self.casedata['gencontrol'][:,0]==i_bus)[0]
constraints.append( self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - (1/h) *sum( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('m', i_bus)], idm[(k[0], k[1], i)][('w', j, i_bus)]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) - X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('d', i_bus)], idm[(k[0], k[1], i)][('w', r, i_bus)]] >= self.casedata['gencontrol'][i_gc, 6][0] )
constraints.append( self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - (1/h) *sum( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('m', i_bus)], idm[(k[0], k[1], i)][('w', j, i_bus)]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) - X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('d', i_bus)], idm[(k[0], k[1], i)][('w', r, i_bus)]] <= self.casedata['gencontrol'][i_gc, 7][0] )
for i_bus in self.i_gen:
i_gc = np.where(self.casedata['gencontrol'][:,0]==i_bus)[0]
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('m', i_bus)]] >= self.casedata['gencontrol'][i_gc, 2][0])
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('m', i_bus)]] <= self.casedata['gencontrol'][i_gc, 3][0])
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('d', i_bus)]] >= self.casedata['gencontrol'][i_gc, 4][0])
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('d', i_bus)]] <= self.casedata['gencontrol'][i_gc, 5][0])
# continuity constraints of differential variable profiles across time element boundaries within the subproblem ls, i.e., x[i, 0] = x[i-1, n_c], and also initial value constraints for the first time element.
for (k, i) in self.set_k_i:
if i == 1: # whether time element i is the first time element, if yes, add initial value constraints
for i_bus in self.i_gen:
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('w', 0, i_bus)]] == self.w_0[i_bus-1] )
for i_bus in self.i_gen + self.i_load + self.i_non:
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', 0, i_bus)]] == self.theta_0[i_bus-1] )
elif (k, i - 1) in self.set_k_i: # whether two adjacent time elements are in the subproblem.
nc = self.param_dd[k[0]][i-1][2]
for i_bus in self.i_gen:
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('w', 0, i_bus)]] == X[(k[0], k[1], i-1)][0, idm[(k[0], k[1], i-1)][('w', nc, i_bus)]] )
for i_bus in self.i_gen + self.i_load:
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', 0, i_bus)]] == X[(k[0], k[1], i-1)][0, idm[(k[0], k[1], i-1)][('theta', nc, i_bus)]] )
# constraints for the lifting variables l_m and l_d
for (k, i) in self.set_k_i:
nc = self.param_dd[k[0]][i][2]
for r in range(0, nc+1):
for i_bus in self.i_gen:
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('lm', r, i_bus)]] == X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('m', i_bus)], idm[(k[0], k[1], i)][('w', r, i_bus)]] )
constraints.append( X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('ld', r, i_bus)]] == X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('d', i_bus)], idm[(k[0], k[1], i)][('w', r, i_bus)]] )
# constraints for the first element of matrices X
for (k, i) in self.set_k_i:
constraints.append( X[(k[0], k[1], i)][0, 0] == 1)
# sharing variables in X_[m,d] for different (k,i)
for (k, i) in self.set_k_i[1:]:
constraints.append(X[(k[0], k[1], i)][0:2*len(self.i_gen)+1+1, 0:2*len(self.i_gen)+1+1]
== X[self.set_k_i[0][0][0],self.set_k_i[0][0][1], self.set_k_i[0][1]][0:2*len(self.i_gen)+1+1, 0:2*len(self.i_gen)+1+1])
#semidefine constraints
for (k, i) in self.set_k_i:
constraints.append( X[(k[0], k[1], i)] >> 0 )
'''
# constraints for tightening the relaxation
for (k, i) in self.set_k_i:
for i_bus in self.i_gen:
for (j1, j2) in list(combinations(range(self.param_dd[k[0]][i][2]+1),2)):
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('lm', j1, i_bus)], idm[(k[0], k[1], i)][('ld', j2, i_bus)]] == X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('lm', j2, i_bus)], idm[(k[0], k[1], i)][('ld', j1, i_bus)]] )
for (k, i) in self.set_k_i:
nc = self.param_dd[k[0]][i][2]
s_tau= self.param_dd[k[0]][i][3]
h = self.param_dd[k[0]][i][1] - self.param_dd[k[0]][i][0]
for r in range(1, nc+1): # for each colloction point
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
# frequency constraints
for i_bus in self.i_gen:
for key_fb in self.casedata['freq_band'][k[0]]:
if self.param_dd[k[0]][i][0] + h * s_tau[r] > key_fb[0] and self.param_dd[k[0]][i][0] + h * s_tau[r] <= key_fb[1]:
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('w', r, i_bus)], idm[(k[0], k[1], i)][('w', r, i_bus)]] <= (self.casedata['freq_band'][k[0]][key_fb][1] - 50)**2 )
break
# branch rotor angle difference constraints
for (i_bus_f, i_bus_t) in self.ind_branch:
if ratio_B[i_bus_f-1, i_bus_t-1] != 0:
constraints.append(
X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', r, i_bus_f)], idm[(k[0], k[1], i)][('theta', r, i_bus_f)]]
- X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', r, i_bus_f)], idm[(k[0], k[1], i)][('theta', r, i_bus_t)]]
- X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', r, i_bus_t)], idm[(k[0], k[1], i)][('theta', r, i_bus_f)]]
+ X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', r, i_bus_t)], idm[(k[0], k[1], i)][('theta', r, i_bus_t)]]
<= (135/180*np.pi)**2 )
for (k, i) in self.set_k_i:
for i_bus in self.i_gen:
i_gc = np.where(self.casedata['gencontrol'][:,0]==i_bus)[0]
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('m', i_bus)], idm[(k[0], k[1], i)][('m', i_bus)]] >= (self.casedata['gencontrol'][i_gc, 2][0])**2 )
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('m', i_bus)], idm[(k[0], k[1], i)][('m', i_bus)]] <= (self.casedata['gencontrol'][i_gc, 3][0])**2 )
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('d', i_bus)], idm[(k[0], k[1], i)][('d', i_bus)]] >= (self.casedata['gencontrol'][i_gc, 4][0])**2 )
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('d', i_bus)], idm[(k[0], k[1], i)][('d', i_bus)]] <= (self.casedata['gencontrol'][i_gc, 5][0])**2 )
# continuity constraints of differential variable profiles across time element boundaries within the subproblem ls, i.e., x[i, 0] = x[i-1, n_c], and also initial value constraints for the first time element.
for (k, i) in self.set_k_i:
if i == 1: # whether time element i is the first time element, if yes, add initial value constraints
for j in range(0, self.param_dd[k[0]][i][2] + 1):
for i_bus in self.i_gen:
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('w', 0, i_bus)], idm[(k[0], k[1], i)][('w', j, i_bus)]] == (self.w_0[i_bus-1]) * X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('w', j, i_bus)]] )
for i_bus in self.i_gen + self.i_load + self.i_non:
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', 0, i_bus)], idm[(k[0], k[1], i)][('theta', j, i_bus)]] == (self.theta_0[i_bus-1]) * X[(k[0], k[1], i)][0, idm[(k[0], k[1], i)][('theta', j, i_bus)]] )
elif (k, i - 1) in self.set_k_i: # whether two adjacent time elements are in the subproblem.
nc = self.param_dd[k[0]][i-1][2]
for i_bus in self.i_gen:
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('w', 0, i_bus)], idm[(k[0], k[1], i)][('w', 0, i_bus)]] == X[(k[0], k[1], i-1)][idm[(k[0], k[1], i-1)][('w', nc, i_bus)], idm[(k[0], k[1], i-1)][('w', nc, i_bus)]] )
for i_bus in self.i_gen + self.i_load:
constraints.append( X[(k[0], k[1], i)][idm[(k[0], k[1], i)][('theta', 0, i_bus)], idm[(k[0], k[1], i)][('theta', 0, i_bus)]] == X[(k[0], k[1], i-1)][idm[(k[0], k[1], i-1)][('theta', nc, i_bus)], idm[(k[0], k[1], i-1)][('theta', nc, i_bus)]] )
'''
###### objective function ###############
J = sum(J_ki[(k,i)] for (k, i) in self.set_k_i)
objective = cp.Minimize(J)
opt_sm = cp.Problem(objective, constraints)
opt_sm.solve(solver = 'MOSEK', verbose = False)
self.opt_result_ln_sdp = deepcopy(opt_sm)
self.opt_result_ln_sdp_X = X
def opt_lin_sdp_small(self):
'''
lineralized power flow mode and SDP
'''
X_lmd = dict()
X_mdw = dict()
X_theta = dict()
for (k, i) in self.set_k_i:
nc = self.param_dd[k[0]][i][2]
for i_bus in self.i_gen:
# X_lmd : [1 lm(nc + 1) ld(nc + 1)]^T [1 lm(nc + 1) ld(nc + 1)]
n_lmd = 1 + 2*(nc + 1)
X_lmd[(k[0], k[1], i, i_bus)] = cp.Variable(( n_lmd, n_lmd), symmetric =True)
for i_bus in self.i_gen:
# X_mdw : [1 m(1) d(1) w(nc + 1)]^T [1 m(1) d(1) w(nc + 1)]
n_mdw = 1 + 2+ (nc + 1)
X_mdw[(k[0], k[1], i, i_bus)] = cp.Variable(( n_mdw, n_mdw), symmetric =True)
for i_clique in self.clique_tree_theta['node'].keys():
# X_theta : [1 ...]
n_theta = 1 + len(self.clique_tree_theta['node'][i_clique]) * (1 + nc)
X_theta[(k[0], k[1], i, i_clique)] = cp.Variable(( n_theta, n_theta), symmetric =True)
ind_lmd = dict()
ind_mdw = dict()
ind_theta = dict()
for (k, i) in self.set_k_i:
nc = self.param_dd[k[0]][i][2]
i_lmd = 1
for j in range(0, nc+1):
ind_lmd[(k[0], k[1], i, j, 'lm')] = i_lmd
i_lmd += 1
for j in range(0, nc+1):
ind_lmd[(k[0], k[1], i, j, 'ld')] = i_lmd
i_lmd += 1
i_mdw = 1
ind_mdw[(k[0], k[1], i, 'm')] = i_mdw
i_mdw += 1
ind_mdw[(k[0], k[1], i, 'd')] = i_mdw
i_mdw += 1
for j in range(0, nc+1):
ind_mdw[(k[0], k[1], i, j, 'w')] = i_mdw
i_mdw += 1
for i_clique in self.clique_tree_theta['node'].keys():
i_theta = 1
for j in range(0, nc+1):
for i_bus in self.clique_tree_theta['node'][i_clique]:
ind_theta[(k[0], k[1], i, i_clique, j, i_bus)] = i_theta
i_theta += 1
constraints = list()
J_ki = dict()
for (k, i) in self.set_k_i:
########### J_ki ###############
print( k,i)
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, 0)
nc = self.param_dd[k[0]][i][2] # order of disturbance k[0] (the same type of disturbances is with the same
s_tau= self.param_dd[k[0]][i][3] # collocation points, tau, of disturbance k[0]
h = self.param_dd[k[0]][i][1] - self.param_dd[k[0]][i][0] # length of time element i for disturbance k[0]
J_theta = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * sum(
(
X_theta[(k[0], k[1], i, self.bus_clique[i_bus_f])][ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_f], j1, i_bus_f)], ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_f], j2, i_bus_f)]]
- X_theta[(k[0], k[1], i, self.branch_clique[(i_bus_f, i_bus_t)])][ind_theta[(k[0], k[1], i, self.branch_clique[(i_bus_f, i_bus_t)], j1, i_bus_f)], ind_theta[(k[0], k[1], i, self.branch_clique[(i_bus_f, i_bus_t)], j2, i_bus_t)]]
- X_theta[(k[0], k[1], i, self.branch_clique[(i_bus_t, i_bus_f)])][ind_theta[(k[0], k[1], i, self.branch_clique[(i_bus_t, i_bus_f)], j1, i_bus_t)], ind_theta[(k[0], k[1], i, self.branch_clique[(i_bus_t, i_bus_f)], j2, i_bus_f)]]
+ X_theta[(k[0], k[1], i, self.bus_clique[i_bus_t])][ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_t], j1, i_bus_t)], ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_t], j2, i_bus_t)]]
)* ratio_B[i_bus_f - 1, i_bus_t - 1] for (i_bus_f, i_bus_t) in self.ind_branch ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_w = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * sum(
X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, j1, 'w')], ind_mdw[(k[0], k[1], i, j2, 'w')]] for i_bus in self.i_gen ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_der_w = (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * sum(
X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, j1, 'w')], ind_mdw[(k[0], k[1], i, j2, 'w')]] for i_bus in self.i_gen ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ce_gen = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * ( sum( X_lmd[(k[0], k[1], i, i_bus)][ind_lmd[(k[0], k[1], i, j1, 'ld')], ind_lmd[(k[0], k[1], i, j2, 'ld')]] for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) ) \
+ (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * ( sum( X_lmd[(k[0], k[1], i, i_bus)][ind_lmd[(k[0], k[1], i, j1, 'lm')], ind_lmd[(k[0], k[1], i, j2, 'lm')]] for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) ) \
+ 2 *(1/h) * sum( sum( self.int_l[3, nc, s_tau, j1, j2] * ( sum( X_lmd[(k[0], k[1], i, i_bus)][ind_lmd[(k[0], k[1], i, j1, 'lm')], ind_lmd[(k[0], k[1], i, j2, 'ld')]] for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ce_load = (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * ( sum( (self.D[i_bus-1])**2 * X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], j1, i_bus)], ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], j2, i_bus)]] for i_bus in self.i_load ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ki[(k,i)] = h * (J_theta + J_w + J_der_w + J_ce_gen + J_ce_load) * self.casedata['disturbance'][k[0]][k[1]-1][-1]
###########constrains of collocation equations###############
for r in range(1, nc+1): # for each colloction point
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
# der(theta) = w for all generator/inverter busese, i.e., i_gen
for i_bus in self.i_gen:
constraints.append( sum( X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], j, i_bus)]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) == h * X_mdw[(k[0], k[1], i, i_bus)][0, ind_mdw[(k[0], k[1], i, r, 'w')]] )
# der (theta) for load buses.
for i_bus in self.i_load:
constraints.append( sum( X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], j, i_bus)]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) ==
h * (1/self.D[i_bus-1]) * (self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* (np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( ( X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], r, i_bus)]] - X_theta[(k[0], k[1], i, self.bus_clique[j_bus])][0, ind_theta[(k[0], k[1], i, self.bus_clique[j_bus], r, j_bus)]] ) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) ) for j_bus in self.i_all) ) )
# der w = for all generators
for i_bus in self.i_gen:
constraints.append( sum( X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, 'm')], ind_mdw[(k[0], k[1], i, j, 'w')]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) == h * ( - X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, 'd')], ind_mdw[(k[0], k[1], i, r, 'w')]] + self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* ( np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( ( X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], r, i_bus)]] - X_theta[(k[0], k[1], i, self.bus_clique[j_bus])][0, ind_theta[(k[0], k[1], i, self.bus_clique[j_bus], r, j_bus)]] ) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) ) for j_bus in self.i_all)) )
# 0 = g for non load & generator buses
if i == 1: ii = 1
else: ii=0
for r in range(ii, nc+1): # for each colloction point including r=0
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
for i_bus in self.i_non:
constraints.append( 0 == self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* ( np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( ( X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], r, i_bus)]] - X_theta[(k[0], k[1], i, self.bus_clique[j_bus])][0, ind_theta[(k[0], k[1], i, self.bus_clique[j_bus], r, j_bus)]] ) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) ) for j_bus in self.i_all) )
########### frequency constraints, resources constraints, and also constraints for m and d ################
for r in range(1, nc+1): # for each colloction point
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
# frequency constraints w_l(t) <= w(t) <= w_u(t) for all generator/inverter busese, i.e., i_gen
for i_bus in self.i_gen:
for key_fb in self.casedata['freq_band'][k[0]]:
if self.param_dd[k[0]][i][0] + h * s_tau[r] > key_fb[0] and self.param_dd[k[0]][i][0] + h * s_tau[r] <= key_fb[1]:
constraints.append( X_mdw[(k[0], k[1], i, i_bus)][0, ind_mdw[(k[0], k[1], i, r, 'w')]] >= (self.casedata['freq_band'][k[0]][key_fb][0] - 50) * 2 * np.pi )
constraints.append( X_mdw[(k[0], k[1], i, i_bus)][0, ind_mdw[(k[0], k[1], i, r, 'w')]] <= (self.casedata['freq_band'][k[0]][key_fb][1] - 50) * 2 * np.pi )
break
# branch rotor angle difference constraints
for (i_bus_f, i_bus_t) in self.ind_branch:
if ratio_B[i_bus_f-1, i_bus_t-1] != 0:
constraints.append( ( X_theta[(k[0], k[1], i, self.bus_clique[i_bus_f])][0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_f], r, i_bus_f)]] - X_theta[(k[0], k[1], i, self.bus_clique[i_bus_t])][0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_t], r, i_bus_t)]] ) <= 135/180*np.pi )
constraints.append( ( X_theta[(k[0], k[1], i, self.bus_clique[i_bus_f])][0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_f], r, i_bus_f)]] - X_theta[(k[0], k[1], i, self.bus_clique[i_bus_t])][0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_t], r, i_bus_t)]] ) >= -135/180*np.pi )
# resources constraints p_l <= p - m*der(w) - d*w <= p_u for all generators, and also constraints for m and d
for i_bus in self.i_gen:
i_gc = np.where(self.casedata['gencontrol'][:,0]==i_bus)[0]
constraints.append( self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - (1/h) * sum( X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, 'm')], ind_mdw[(k[0], k[1], i, j, 'w')]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) - X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, 'd')], ind_mdw[(k[0], k[1], i, r, 'w')]] >= self.casedata['gencontrol'][i_gc, 6][0] )
constraints.append( self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - (1/h) *sum( X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, 'm')], ind_mdw[(k[0], k[1], i, j, 'w')]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) - X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, 'd')], ind_mdw[(k[0], k[1], i, r, 'w')]] <= self.casedata['gencontrol'][i_gc, 7][0] )
for i_bus in self.i_gen:
i_gc = np.where(self.casedata['gencontrol'][:,0]==i_bus)[0]
constraints.append( X_mdw[(k[0], k[1], i, i_bus)][0, ind_mdw[(k[0], k[1], i, 'm')]] >= self.casedata['gencontrol'][i_gc, 2][0])
constraints.append( X_mdw[(k[0], k[1], i, i_bus)][0, ind_mdw[(k[0], k[1], i, 'm')]] <= self.casedata['gencontrol'][i_gc, 3][0])
constraints.append( X_mdw[(k[0], k[1], i, i_bus)][0, ind_mdw[(k[0], k[1], i, 'd')]] >= self.casedata['gencontrol'][i_gc, 4][0])
constraints.append( X_mdw[(k[0], k[1], i, i_bus)][0, ind_mdw[(k[0], k[1], i, 'd')]] <= self.casedata['gencontrol'][i_gc, 5][0])
# continuity constraints of differential variable profiles across time element boundaries within the subproblem ls, i.e., x[i, 0] = x[i-1, n_c], and also initial value constraints for the first time element.
for (k, i) in self.set_k_i:
if i == 1: # whether time element i is the first time element, if yes, add initial value constraints
for i_bus in self.i_gen:
constraints.append( X_mdw[(k[0], k[1], i, i_bus)][[0, ind_mdw[(k[0], k[1], i, 0, 'w')]],:][:,[0, ind_mdw[(k[0], k[1], i, 0, 'w')]]] == np.array([[1, self.w_0[i_bus-1]]]).T.dot(np.array([[1, self.w_0[i_bus-1]]])))
for i_bus in self.i_gen + self.i_load + self.i_non:
constraints.append( X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][[0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], 0, i_bus)]],:][:,[0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], 0, i_bus)]]] == np.array([[1, self.theta_0[i_bus-1] ]]).T.dot(np.array([[1, self.theta_0[i_bus-1] ]]) ) )
elif (k, i - 1) in self.set_k_i: # whether two adjacent time elements are in the subproblem.
nc = self.param_dd[k[0]][i-1][2]
for i_bus in self.i_gen:
constraints.append( X_mdw[(k[0], k[1], i, i_bus)][[0, ind_mdw[(k[0], k[1], i, 0, 'w')]],:][:,[0, ind_mdw[(k[0], k[1], i, 0, 'w')]]] == X_mdw[(k[0], k[1], i-1, i_bus)][[0, ind_mdw[(k[0], k[1], i-1, nc, 'w')]],:][:,[0, ind_mdw[(k[0], k[1], i-1, nc, 'w')]]] )
for i_bus in self.i_gen + self.i_load:
constraints.append( X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][[0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], 0, i_bus)]],:][:,[0, ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], 0, i_bus)]]] == X_theta[(k[0], k[1], i-1, self.bus_clique[i_bus])][[0, ind_theta[(k[0], k[1], i-1, self.bus_clique[i_bus], nc, i_bus)]],:][:,[0, ind_theta[(k[0], k[1], i-1, self.bus_clique[i_bus], nc, i_bus)]]] )
# constraints for the lifting variables l_m and l_d
for (k, i) in self.set_k_i:
nc = self.param_dd[k[0]][i][2]
for r in range(0, nc+1):
for i_bus in self.i_gen:
constraints.append( X_lmd[(k[0], k[1], i, i_bus)][0, ind_lmd[(k[0], k[1], i, r, 'lm')]] == X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, 'm')], ind_mdw[(k[0], k[1], i, r, 'w')]] )
constraints.append( X_lmd[(k[0], k[1], i, i_bus)][0, ind_lmd[(k[0], k[1], i, r, 'ld')]] == X_mdw[(k[0], k[1], i, i_bus)][ind_mdw[(k[0], k[1], i, 'd')], ind_mdw[(k[0], k[1], i, r, 'w')]] )
# constraints for the first element of matrices X
for (k, i) in self.set_k_i:
for i_bus in self.i_gen:
constraints.append(X_lmd[(k[0], k[1], i, i_bus)][0, 0] == 1)
constraints.append(X_mdw[(k[0], k[1], i, i_bus)][0, 0] == 1)
for i_clique in self.clique_tree_theta['node'].keys():
constraints.append( X_theta[(k[0], k[1], i, i_clique)][0, 0] == 1 )
# sharing variables in X_mdw for different (k,i)
for (k, i) in self.set_k_i[1:]:
root_k0, root_k1, root_i = self.set_k_i[0][0][0], self.set_k_i[0][0][1], self.set_k_i[0][1]
for i_bus in self.i_gen:
constraints.append( X_mdw[(k[0], k[1], i, i_bus)][[0, ind_mdw[(k[0], k[1], i, 'm')], ind_mdw[(k[0], k[1], i, 'd')]], :][:, [0, ind_mdw[(k[0], k[1], i, 'm')], ind_mdw[(k[0], k[1], i, 'd')]]] ==
X_mdw[(root_k0, root_k1, root_i, i_bus)][[0, ind_mdw[(root_k0, root_k1, root_i, 'm')], ind_mdw[(root_k0, root_k1, root_i, 'd')]], :][:, [0, ind_mdw[(root_k0, root_k1, root_i, 'm')], ind_mdw[(root_k0, root_k1, root_i, 'd')]]] )
# sharing variables in X_theta for different cliques.
for (k, i) in self.set_k_i:
nc = self.param_dd[k[0]][i][2]
for edge_clique in self.clique_tree_theta['edge']:
share_bus = list(set(self.clique_tree_theta['node'][edge_clique[0]] ).intersection(set(self.clique_tree_theta['node'][edge_clique[1]])))
share_index_0, share_index_1 = [0], [0]
for j in range(0, nc+1):
for i_bus in share_bus:
share_index_0.append(ind_theta[(k[0], k[1], i, edge_clique[0], j, i_bus)])
share_index_1.append(ind_theta[(k[0], k[1], i, edge_clique[1], j, i_bus)])
constraints.append( X_theta[(k[0], k[1], i, edge_clique[0])][share_index_0, :][:,share_index_0] == X_theta[(k[0], k[1], i, edge_clique[1])][share_index_1, :][:,share_index_1] )
#semidefine constraints
for (k, i) in self.set_k_i:
for i_bus in self.i_gen:
constraints.append( X_lmd[(k[0], k[1], i, i_bus)] >> 0 )
constraints.append( X_mdw[(k[0], k[1], i, i_bus)] >> 0 )
for i_clique in self.clique_tree_theta['node'].keys():
constraints.append( X_theta[(k[0], k[1], i, i_clique)] >> 0 )
###### objective function ###############
J = sum(J_ki[(k,i)] for (k, i) in self.set_k_i)
objective = cp.Minimize(J)
opt_sm = cp.Problem(objective, constraints)
opt_sm.solve(solver = 'MOSEK', verbose = True, mosek_params={mosek.iparam.num_threads: 1})
self.opt_result = deepcopy(opt_sm)
self.opt_result_X_lmd = dict()
self.opt_result_X_mdw = dict()
self.opt_result_X_theta = dict()
for (k, i) in self.set_k_i:
for i_bus in self.i_gen:
self.opt_result_X_lmd[(k[0], k[1], i, i_bus)] = X_lmd[(k[0], k[1], i, i_bus)]
self.opt_result_X_mdw[(k[0], k[1], i, i_bus)] = X_mdw[(k[0], k[1], i, i_bus)]
for i_clique in self.clique_tree_theta['node'].keys():
self.opt_result_X_theta[(k[0], k[1], i, i_clique)] = X_theta[(k[0], k[1], i, i_clique)]
#=============================The following is the block for function "opt_ln_sdp_small_admm" ant its subfunction================================#
def opt_lin_sdp_small_admm(self, rho = 1, sep_mode = 'i', N_s = 3, N_s_k = 1, N_s_i = 3):
# parameters of admm
self.rho = rho
self.sep_mode = sep_mode
self.N_s = N_s
self.N_s_k = N_s_k
self.N_s_i = N_s_i
self.rho_tau = 2#1.2
self.rho_mu = 10
self.epsilon_abs = 1e-5
self.epsilon_rel = 1e-3
# separate the problem
self.seperation_compute()
self.make_select_array()
# define z and lambda
self.define_z_lambda()
# initialize all the optimization model for x, get self.opt_x = dict() with the opt model for each s \in P
self.opt_x_init()
# iteration, including modify model, solve opt_x, update z and lambda
for self.kappa in range(1):
self.opt_x_modify()
self.opt_x_solve()
self.update_z()
self.update_lambda()
self.termination()
# self.update_rho()
def define_z_lambda(self):
'''
define z and lambda and initialize
'''
# define global variables z
self.z_md = dict() # R^{3 x 3}
for i_bus in self.i_gen: self.z_md[i_bus] = np.array([[1, self.m_0[i_bus], self.d_0[i_bus]]]).T.dot( np.array([[1, self.m_0[i_bus], self.d_0[i_bus]]]) )
# self.z_w = dict() R^{2 x 2}
self.z_w = dict(zip([(ki[0][0], ki[0][1], ki[1], i_bus) for ki in self.M_lower_union for i_bus in self.i_gen], [np.array([[1, self.w_0[i_bus - 1] ]]).T.dot( np.array([[1, self.w_0[i_bus - 1] ]]) ) for ki in self.M_lower_union for i_bus in self.i_gen] ))
# self.z_theta = dict() R^{ 1 + number of gen_load buses in clique i_clique}
self.z_theta = dict(zip([(ki[0][0], ki[0][1], ki[1], i_clique) for ki in self.M_lower_union for i_clique in self.clique_tree_theta['node_gl'].keys()],
[np.array( [[1] + self.theta_0[(np.array(self.clique_tree_theta['node_gl'][i_clique]) - 1).tolist()].tolist() ] ).T.dot( np.array( [[1] + self.theta_0[(np.array(self.clique_tree_theta['node_gl'][i_clique]) - 1).tolist()].tolist() ] ) ) for ki in self.M_lower_union for i_clique in self.clique_tree_theta['node_gl'].keys() ] ))
# define Lambda for each s \in P
self.Lambda = dict()
for s in self.P:
self.Lambda[s] = dict()
self.Lambda[s]['md'] = dict()
for i_bus in self.i_gen: self.Lambda[s]['md'][i_bus] = np.zeros((3,3))
self.Lambda[s]['w'] = dict(zip([(ki[0][0], ki[0][1], ki[1], i_bus) for ki in self.M_lower[s] + self.M_upper[s] for i_bus in self.i_gen], [ np.zeros((2,2)) for ki in self.M_lower[s] + self.M_upper[s] for i_bus in self.i_gen] ))
self.Lambda[s]['theta'] = dict(zip([(ki[0][0], ki[0][1], ki[1], i_clique) for ki in self.M_lower[s] + self.M_upper[s] for i_clique in self.clique_tree_theta['node_gl'].keys()], [np.zeros(( 1 + len(self.clique_tree_theta['node_gl'][i_clique]) , 1 + len(self.clique_tree_theta['node_gl'][i_clique]) )) for ki in self.M_lower[s] + self.M_upper[s] for i_clique in self.clique_tree_theta['node_gl'].keys() ] ))
def _opt_xs_init(self, s):
'''
initialize the optimization model for optimization x_s, for a given s
'''
set_k_i_s = self.Xi[s] # set_k_i for the subproblem s
# optimization variables
for (k, i) in set_k_i_s:
nc = self.param_dd[k[0]][i][2]
for i_bus in self.i_gen:
# X_lmd : [1 lm(nc + 1) ld(nc + 1)]^T [1 lm(nc + 1) ld(nc + 1)]
n_lmd = 1 + 2*(nc + 1)
self.X_lmd[(k[0], k[1], i, i_bus)] = cp.Variable(( n_lmd, n_lmd), symmetric =True)
self.epsilon_n = self.epsilon_n + n_lmd**2
for i_bus in self.i_gen:
# X_mdw : [1 m(1) d(1) w(nc + 1)]^T [1 m(1) d(1) w(nc + 1)]
n_mdw = 1 + 2+ (nc + 1)
self.X_mdw[(k[0], k[1], i, i_bus)] = cp.Variable(( n_mdw, n_mdw), symmetric =True)
self.epsilon_n = self.epsilon_n + n_mdw**2
for i_clique in self.clique_tree_theta['node'].keys():
# X_theta : [1 ...]
n_theta = 1 + len(self.clique_tree_theta['node'][i_clique]) * (1 + nc)
self.X_theta[(k[0], k[1], i, i_clique)] = cp.Variable(( n_theta, n_theta), symmetric =True)
self.epsilon_n = self.epsilon_n + n_theta**2
self.phi[s] =dict()
self.phi[s]['md'] = dict()
self.phi[s]['w'] = dict()
self.phi[s]['theta'] = dict()
for i_bus in self.i_gen: self.phi[s]['md'][i_bus] = cp.Variable()
for ki in self.M_lower[s]:
for i_bus in self.i_gen:
self.phi[s]['w'][(ki[0][0], ki[0][1], ki[1], i_bus)] = cp.Variable()
for ki in self.M_upper[s]:
for i_bus in self.i_gen:
self.phi[s]['w'][(ki[0][0], ki[0][1], ki[1], i_bus)] = cp.Variable()
for ki in self.M_lower[s]:
for i_clique in self.clique_tree_theta['node_gl'].keys():
self.phi[s]['theta'][(ki[0][0], ki[0][1], ki[1], i_clique)] = cp.Variable()
for ki in self.M_upper[s]:
for i_clique in self.clique_tree_theta['node_gl'].keys():
self.phi[s]['theta'][(ki[0][0], ki[0][1], ki[1], i_clique)] = cp.Variable()
# index map
for (k, i) in set_k_i_s:
nc = self.param_dd[k[0]][i][2]
i_lmd = 1
for j in range(0, nc+1):
self.ind_lmd[(k[0], k[1], i, j, 'lm')] = i_lmd
i_lmd += 1
for j in range(0, nc+1):
self.ind_lmd[(k[0], k[1], i, j, 'ld')] = i_lmd
i_lmd += 1
i_mdw = 1
self.ind_mdw[(k[0], k[1], i, 'm')] = i_mdw
i_mdw += 1
self.ind_mdw[(k[0], k[1], i, 'd')] = i_mdw
i_mdw += 1
for j in range(0, nc+1):
self.ind_mdw[(k[0], k[1], i, j, 'w')] = i_mdw
i_mdw += 1
for i_clique in self.clique_tree_theta['node'].keys():
i_theta = 1
for j in range(0, nc+1):
for i_bus in self.clique_tree_theta['node'][i_clique]:
self.ind_theta[(k[0], k[1], i, i_clique, j, i_bus)] = i_theta
i_theta += 1
# parameters
self.P_Lambda[s] = dict()
self.P_Lambda[s]['md'] = dict()
for i_bus in self.i_gen: self.P_Lambda[s]['md'][i_bus] = cp.Parameter((3,3), symmetric=True)
self.P_Lambda[s]['w'] = dict(zip([(ki[0][0], ki[0][1], ki[1], i_bus) for ki in self.M_lower[s] + self.M_upper[s] for i_bus in self.i_gen], [ cp.Parameter((2,2), symmetric=True) for ki in self.M_lower[s] + self.M_upper[s] for i_bus in self.i_gen] ))
self.P_Lambda[s]['theta'] = dict(zip([(ki[0][0], ki[0][1], ki[1], i_clique) for ki in self.M_lower[s] + self.M_upper[s] for i_clique in self.clique_tree_theta['node_gl'].keys()], [cp.Parameter(( 1 + len(self.clique_tree_theta['node_gl'][i_clique]) , 1 + len(self.clique_tree_theta['node_gl'][i_clique]) ), symmetric=True) for ki in self.M_lower[s] + self.M_upper[s] for i_clique in self.clique_tree_theta['node_gl'].keys() ] ))
# optimization model
constraints = list()
for (k, i) in set_k_i_s:
########### J_ki ###############
print( k,i)
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, 0)
nc = self.param_dd[k[0]][i][2] # order of disturbance k[0] (the same type of disturbances is with the same
s_tau= self.param_dd[k[0]][i][3] # collocation points, tau, of disturbance k[0]
h = self.param_dd[k[0]][i][1] - self.param_dd[k[0]][i][0] # length of time element i for disturbance k[0]
J_theta = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * sum(
(
self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus_f])][self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_f], j1, i_bus_f)], self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_f], j2, i_bus_f)]]
- self.X_theta[(k[0], k[1], i, self.branch_clique[(i_bus_f, i_bus_t)])][self.ind_theta[(k[0], k[1], i, self.branch_clique[(i_bus_f, i_bus_t)], j1, i_bus_f)], self.ind_theta[(k[0], k[1], i, self.branch_clique[(i_bus_f, i_bus_t)], j2, i_bus_t)]]
- self.X_theta[(k[0], k[1], i, self.branch_clique[(i_bus_t, i_bus_f)])][self.ind_theta[(k[0], k[1], i, self.branch_clique[(i_bus_t, i_bus_f)], j1, i_bus_t)], self.ind_theta[(k[0], k[1], i, self.branch_clique[(i_bus_t, i_bus_f)], j2, i_bus_f)]]
+ self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus_t])][self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_t], j1, i_bus_t)], self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_t], j2, i_bus_t)]]
)* ratio_B[i_bus_f - 1, i_bus_t - 1] for (i_bus_f, i_bus_t) in self.ind_branch ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_w = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * sum(
self.X_mdw[(k[0], k[1], i, i_bus)][self.ind_mdw[(k[0], k[1], i, j1, 'w')], self.ind_mdw[(k[0], k[1], i, j2, 'w')]] for i_bus in self.i_gen ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_der_w = (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * sum(
self.X_mdw[(k[0], k[1], i, i_bus)][self.ind_mdw[(k[0], k[1], i, j1, 'w')], self.ind_mdw[(k[0], k[1], i, j2, 'w')]] for i_bus in self.i_gen ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ce_gen = sum( sum( self.int_l[1, nc, s_tau, j1, j2] * ( sum( self.X_lmd[(k[0], k[1], i, i_bus)][self.ind_lmd[(k[0], k[1], i, j1, 'ld')], self.ind_lmd[(k[0], k[1], i, j2, 'ld')]] for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) ) \
+ (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * ( sum( self.X_lmd[(k[0], k[1], i, i_bus)][self.ind_lmd[(k[0], k[1], i, j1, 'lm')], self.ind_lmd[(k[0], k[1], i, j2, 'lm')]] for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) ) \
+ 2 *(1/h) * sum( sum( self.int_l[3, nc, s_tau, j1, j2] * ( sum( self.X_lmd[(k[0], k[1], i, i_bus)][self.ind_lmd[(k[0], k[1], i, j1, 'lm')], self.ind_lmd[(k[0], k[1], i, j2, 'ld')]] for i_bus in self.i_gen ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
J_ce_load = (1.0/h)**2 * sum( sum( self.int_l[2, nc, s_tau, j1, j2] * ( sum( (self.D[i_bus-1])**2 * self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], j1, i_bus)], self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], j2, i_bus)]] for i_bus in self.i_load ) ) for j2 in range(0, nc+1) ) for j1 in range(0, nc+1) )
self.J_ki[(k,i)] = h * (J_theta + J_w + J_der_w + J_ce_gen + J_ce_load) * self.casedata['disturbance'][k[0]][k[1]-1][-1]
# ( 127.44357370467415 /21.00778069974123 ) 1
# ( 191.14407395083 /13.900200695381915) 2
# ( 74.95065908260192 /26.829236366725194) 3
# ( 46.104854448839994 /8.607371093162719 ) 4
# ( 83.1014925561 /11.757517973459112) 5
# ( 57.599196728337276 /5.9194825682193475) 6
###########constrains of collocation equations###############
for r in range(1, nc+1): # for each colloction point
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
# der(theta) = w for all generator/inverter busese, i.e., i_gen
for i_bus in self.i_gen:
constraints.append( sum( self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], j, i_bus)]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) == h * self.X_mdw[(k[0], k[1], i, i_bus)][0, self.ind_mdw[(k[0], k[1], i, r, 'w')]] )
# der (theta) for load buses.
for i_bus in self.i_load:
constraints.append( sum( self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], j, i_bus)]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) ==
h * (1/self.D[i_bus-1]) * (self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* (np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( ( self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], r, i_bus)]] - self.X_theta[(k[0], k[1], i, self.bus_clique[j_bus])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[j_bus], r, j_bus)]] ) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) ) for j_bus in self.i_all) ) )
# der w = for all generators
for i_bus in self.i_gen:
constraints.append( sum( self.X_mdw[(k[0], k[1], i, i_bus)][self.ind_mdw[(k[0], k[1], i, 'm')], self.ind_mdw[(k[0], k[1], i, j, 'w')]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) == h * ( - self.X_mdw[(k[0], k[1], i, i_bus)][self.ind_mdw[(k[0], k[1], i, 'd')], self.ind_mdw[(k[0], k[1], i, r, 'w')]] + self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* ( np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( ( self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], r, i_bus)]] - self.X_theta[(k[0], k[1], i, self.bus_clique[j_bus])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[j_bus], r, j_bus)]] ) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) ) for j_bus in self.i_all)) )
# 0 = g for non load & generator buses
if i == 1: ii = 1
else: ii=0
for r in range(ii, nc+1): # for each colloction point including r=0
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
for i_bus in self.i_non:
constraints.append( 0 == self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - self.v_0[i_bus-1]**2 * delta_G[i_bus-1] - self.v_0[i_bus-1] * sum(self.B_0[i_bus-1,j_bus-1] * ratio_B[i_bus-1,j_bus-1] * self.v_0[j_bus-1]
* ( np.sin( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) + ( ( self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus], r, i_bus)]] - self.X_theta[(k[0], k[1], i, self.bus_clique[j_bus])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[j_bus], r, j_bus)]] ) - (self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1]) ) * np.cos( self.theta_0[i_bus - 1] - self.theta_0[j_bus - 1] ) ) for j_bus in self.i_all) )
########### frequency constraints, resources constraints, and also constraints for m and d ################
'''
for r in range(1, nc+1): # for each colloction point
[ratio_pg, ratio_pl, delta_G, ratio_B] = self.array_disturbance(k, i, r)
# frequency constraints w_l(t) <= w(t) <= w_u(t) for all generator/inverter busese, i.e., i_gen
for i_bus in self.i_gen:
for key_fb in self.casedata['freq_band'][k[0]]:
if self.param_dd[k[0]][i][0] + h * s_tau[r] > key_fb[0] and self.param_dd[k[0]][i][0] + h * s_tau[r] <= key_fb[1]:
constraints.append( self.X_mdw[(k[0], k[1], i, i_bus)][0, self.ind_mdw[(k[0], k[1], i, r, 'w')]] >= (self.casedata['freq_band'][k[0]][key_fb][0] - 50) * 2 * np.pi )
constraints.append( self.X_mdw[(k[0], k[1], i, i_bus)][0, self.ind_mdw[(k[0], k[1], i, r, 'w')]] <= (self.casedata['freq_band'][k[0]][key_fb][1] - 50) * 2 * np.pi )
break
# branch rotor angle difference constraints
for (i_bus_f, i_bus_t) in self.ind_branch:
if ratio_B[i_bus_f-1, i_bus_t-1] != 0:
constraints.append( ( self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus_f])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_f], r, i_bus_f)]] - self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus_t])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_t], r, i_bus_t)]] ) <= 135/180*np.pi )
constraints.append( ( self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus_f])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_f], r, i_bus_f)]] - self.X_theta[(k[0], k[1], i, self.bus_clique[i_bus_t])][0, self.ind_theta[(k[0], k[1], i, self.bus_clique[i_bus_t], r, i_bus_t)]] ) >= -135/180*np.pi )
# resources constraints p_l <= p - m*der(w) - d*w <= p_u for all generators, and also constraints for m and d
for i_bus in self.i_gen:
i_gc = np.where(self.casedata['gencontrol'][:,0]==i_bus)[0]
constraints.append( self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - (1/h) *sum( self.X_mdw[(k[0], k[1], i, i_bus)][self.ind_mdw[(k[0], k[1], i, 'm')], self.ind_mdw[(k[0], k[1], i, j, 'w')]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) - self.X_mdw[(k[0], k[1], i, i_bus)][self.ind_mdw[(k[0], k[1], i, 'd')], self.ind_mdw[(k[0], k[1], i, r, 'w')]] >= self.casedata['gencontrol'][i_gc, 6][0] )
constraints.append( self.pg_0[i_bus-1] * ratio_pg[i_bus-1] - self.pl_0[i_bus-1] * ratio_pl[i_bus-1] - (1/h) *sum( self.X_mdw[(k[0], k[1], i, i_bus)][self.ind_mdw[(k[0], k[1], i, 'm')], self.ind_mdw[(k[0], k[1], i, j, 'w')]] * self.der_l[nc, s_tau, j, r] for j in range(0, nc+1) ) - self.X_mdw[(k[0], k[1], i, i_bus)][self.ind_mdw[(k[0], k[1], i, 'd')], self.ind_mdw[(k[0], k[1], i, r, 'w')]] <= self.casedata['gencontrol'][i_gc, 7][0] )
'''
for i_bus in self.i_gen:
i_gc = np.where(self.casedata['gencontrol'][:,0]==i_bus)[0]
constraints.append( self.X_mdw[(k[0], k[1], i, i_bus)][0, self.ind_mdw[(k[0], k[1], i, 'm')]] >= self.casedata['gencontrol'][i_gc, 2][0])
constraints.append( self.X_mdw[(k[0], k[1], i, i_bus)][0, self.ind_mdw[(k[0], k[1], i, 'm')]] <= self.casedata['gencontrol'][i_gc, 3][0])
constraints.append( self.X_mdw[(k[0], k[1], i, i_bus)][0, self.ind_mdw[(k[0], k[1], i, 'd')]] >= self.casedata['gencontrol'][i_gc, 4][0])
constraints.append( self.X_mdw[(k[0], k[1], i, i_bus)][0, self.ind_mdw[(k[0], k[1], i, 'd')]] <= self.casedata['gencontrol'][i_gc, 5][0])
# continuity constraints of differential variable profiles across time element boundaries within the subproblem ls, i.e., x[i, 0] = x[i-1, n_c], and also initial value constraints for the first time element.
for (k, i) in set_k_i_s:
if i == 1: # whether time element i is the first time element, if yes, add initial value constraints
for i_bus in self.i_gen:
constraints.append( self.X_mdw[(k[0], k[1], i, i_bus)][[0, self.ind_mdw[(k[0], k[1], i, 0, 'w')]],:][:,[0, self.ind_mdw[(k[0], k[1], i, 0, 'w')]]] ==
|
np.array([[1, self.w_0[i_bus-1]]])
|
numpy.array
|
import numpy as np
from docanchors.search.strategies.seed import Seed
def test_seed_size(mocker):
for size in range(10):
candidate = np.zeros(20, dtype=bool)
s = Seed(size=size, num_seeds=1)
mocker.patch.object(s, "_random")
s._random.choice.return_value = np.array([0])
result = s(candidate)
assert np.sum(result) == size
assert np.sum(result[:size]) == size
def test_seed_position(mocker):
for position in range(20):
candidate = np.zeros(25, dtype=bool)
s = Seed(size=1, num_seeds=1)
mocker.patch.object(s, "_random")
s._random.choice.return_value =
|
np.array([position])
|
numpy.array
|
#########################################################################
# (C) 2017 Department of Petroleum Engineering, #
# Univeristy of Louisiana at Lafayette, Lafayette, US. #
# #
# This code is released under the terms of the BSD license, and thus #
# free for commercial and research use. Feel free to use the code into #
# your own project with a PROPER REFERENCE. #
# #
# A near wellbore streamline tracking code #
# Author: <NAME> #
# Email: <EMAIL> #
# Reference: <NAME>., <NAME>., <NAME>., et al. (2017) An Embedded #
# Grid-Free Approach for Near Wellbore Streamline Simulation. #
# doi:10.2118/SPE-182614-MS #
#########################################################################
import numpy as np
import matplotlib.pyplot as plt
############################### Grid Subdivision Method #################################
def PointOnUnitSquare(NSL,Endpoint=False):
'''Genetrating points around a unit square
'''
Pts=np.zeros((NSL,2))
NSL_edge=int(NSL/4)
if Endpoint==True:
dx=np.linspace(0.0,1.0,NSL_edge,endpoint=True)
if Endpoint==False:
dx=np.linspace(0.0,1.0,NSL_edge,endpoint=True)
dx_rev=dx[::-1]
for i in range(4):
for j in range(NSL_edge):
if (i==0):
Pts[j+i*NSL_edge,0],Pts[j+i*NSL_edge,1]=dx[j],0
if (i==1):
Pts[j+i*NSL_edge,0],Pts[j+i*NSL_edge,1]=1,dx[j]
if (i==2):
Pts[j+i*NSL_edge,0],Pts[j+i*NSL_edge,1]=dx_rev[j],1
if (i==3):
Pts[j+i*NSL_edge,0],Pts[j+i*NSL_edge,1]=0,dx_rev[j]
return Pts
def PointOnUnitEdge(NSL,Endpoint=False):
'''Genetrating points around a unit Edge
'''
Pts=np.zeros((NSL,2))
if Endpoint==True:
dx=np.linspace(0.0,1.0,NSL,endpoint=True)
if Endpoint==False:
dx=np.linspace(0.03,0.97,NSL,endpoint=True)
for i in range(NSL):
Pts[i,0],Pts[i,1]=0,dx[i]
return Pts
def CalcDist(Pts0=(0,0),Pts1=(1,1)):
'''Calculating distance of two points
'''
return np.sqrt((Pts1[0]-Pts0[0])**2+(Pts1[1]-Pts0[1])**2)
def RotateSL(SL,Single=0,origin=(0.0,0.0),angle=np.pi/2):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
http://stackoverflow.com/questions/34372480/rotate-point-about-another-point-in-degrees-python
"""
#angle=self.RotateAngle
NSL=len(SL)
SL_new=SL.copy()
ox,oy=origin
if (Single==0):
for i in range(NSL):
for j in range(10): #1 SL have 10 nodes
px,py=SL[i][j]
qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)
qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)
SL_new[i][j]=qx,qy
elif (Single==1):
for i in range(10): #1 SL have 10 nodes
px,py=SL[i]
qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)
qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)
SL_new[i]=qx,qy
return SL_new
def TranslateSL(SL,Single=0,new_origin=(1.0,1.0),origin=(0.0,0.0)):
"""
Translate the SL by a given origin
Single=0 Treatment multiple streamline
Single=1 Treatment single streamline
"""
#new_origin=self.NewOrigin
NSL=len(SL)
SL_new=SL.copy()
ox_new,oy_new=new_origin
ox,oy=origin
if (Single==0):
for i in range(NSL):
for j in range(10): #1 SL have 10 nodes
px,py=SL[i][j]
qx = px + (ox_new - ox)
qy = py + (oy_new - oy)
SL_new[i][j]=qx,qy
elif(Single==1):
for i in range(10): #1 SL have 10 nodes
px,py=SL[i]
qx = px + (ox_new - ox)
qy = py + (oy_new - oy)
SL_new[i]=qx,qy
return SL_new
############################### Embedded Method #################################
def cosspace(st,ed,N,endpoint=True):
"""
Auto line segment refinement at end point
e.g. --- - - - - - - ---
"""
#N=N+1
AngleInc=np.pi/(N-1)
CurAngle = AngleInc
space=np.linspace(0,1,N,endpoint=endpoint)
space[0]=st
for i in range(N-1):
space[i+1] = 0.5*np.abs(ed-st)*(1 - np.cos(CurAngle));
CurAngle += AngleInc
if ed<st:
space[0]=ed
space=space[::-1]
return space
def centroid2D(Pts):
Pts = np.asarray(Pts)
Npts=len(Pts)
return np.sum(Pts[:,0])/Npts,np.sum(Pts[:,1])/Npts
def EndPointOnLine(Pts_a=(0,0),Pts_b=(0,1),Nseg=4,refinement="linspace",Endpoint=True):
'''Genetrating endpoints along a line segment
algorithm: point with a given distance along a line: x=x_Start+unit_vector_x*distance y=y_Start+unit_vector_y*distance
Arguments
---------
Pts_a -- The start-point.
Pts_b -- The end-point
Npts -- Number of endpoints
Nseg -- Number of segments
unit_vx -- Unit vector for x coordinates
unit_vy -- Unit vector for y coordinates
interval -- Segment interval
uniform - - - - - - - - - - (linspace)
refinement -- - - - - - - -- (cosspace)
'''
Npts=Nseg+1
Pts=np.zeros((Npts,2))
length=CalcDist(Pts_a,Pts_b)
unit_vx=(Pts_b[0]-Pts_a[0])/length
unit_vy=(Pts_b[1]-Pts_a[1])/length
if (refinement=="linspace"):
interval=np.linspace(0.0,length,Npts,endpoint=Endpoint)
rinterval=np.linspace(length,0.0,Npts,endpoint=Endpoint)
elif (refinement=="cosspace"):
interval=cosspace(0.0,length,Npts,endpoint=Endpoint)
rinterval=cosspace(length,0.0,Npts,endpoint=Endpoint)
for i in range(Npts):
Pts[i,0]=Pts_a[0]+interval[i]*unit_vx
Pts[i,1]=Pts_a[1]+interval[i]*unit_vy
return Pts
def EndPointOnCircle(Origin=(0,0),R=1,Nseg=4):
'''Genetrating endpoints along a circle
Arguments
---------
Origin -- The start-point.
R -- The end-point
Npts -- Number of endpoints
Nseg -- Number of segments
'''
Npts=Nseg+1
Pts=np.zeros((Npts,2))
interval=np.linspace(0, 2*np.pi, Npts)
for i in range(Npts):
Pts[i,0]=Origin[0]+np.cos(interval[i])*R
Pts[i,1]=Origin[1]+np.sin(interval[i])*R
return Pts
def LineSegIntersect(Line1=([0,0],[1,1]),Line2=([0,1],[1,0])):
#Algorithm from http://bryceboe.com/2006/10/23/line-segment-intersection-algorithm/
#Test whether 2 line segment are intersected
xa,ya,xb,yb=Line1[0][0],Line1[0][1],Line1[1][0],Line1[1][1]
xc,yc,xd,yd=Line2[0][0],Line2[0][1],Line2[1][0],Line2[1][1]
ccw_ACD=(yd-ya)*(xc-xa) > (yc-ya)*(xd-xa)
ccw_BCD=(yd-yb)*(xc-xb) > (yc-yb)*(xd-xb)
ccw_ABC=(yc-ya)*(xb-xa) > (yb-ya)*(xc-xa)
ccw_ABD=(yd-ya)*(xb-xa) > (yb-ya)*(xd-xa)
return ccw_ACD != ccw_BCD and ccw_ABC != ccw_ABD
############################### Fill-Grid Method #################################
def PolygonArea(Pts):
#Calculate 2D polygon area using Shoelace formula
#http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates/30408825
x,y=
|
np.asarray(Pts)
|
numpy.asarray
|
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import random
# Enable font colors
class bcolors:
""" For the purpose of print in terminal with colors """
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def obs_to_state(obs, info):
"""
This function converts observation into state
Args:
obs: [x, y, v_x, v_y, cos(theta), sin(theta), theta_dot]
theta= robot orientation, alpha= angle between r->g and x-axis
info: {"goal_position", ...}
Returns:
state: [r_norm, p_norm, alpha, alpha_dot, beta, beta_dot]
r_norm: distance from map origin to robot
p_norm: distance from robot to goal
alpha: angle from map's x to r
beta: angle from robot's x to p
*_dot: angular velocity
"""
# compute states
r = obs[:2]
p = info["goal_position"] - obs[:2]
r_norm = np.linalg.norm(r) # sqrt(x^2+y^2)
p_norm = np.linalg.norm(p)
alpha = np.arctan2(obs[1], obs[0])
alpha_dot = np.arctan2(obs[3], obs[2])
# comput phi: angle from map's x_axis to p
x_axis = np.array([1, 0])
y_axis = np.array([0, 1])
cos_phi = np.dot(p, x_axis) / (np.linalg.norm(p)*np.linalg.norm(x_axis))
sin_phi = np.dot(p, y_axis) / (np.linalg.norm(p)*np.linalg.norm(y_axis))
phi = np.arctan2(sin_phi, cos_phi)
# compute beta in [-pi, pi]
beta = phi - np.arctan2(obs[-2], obs[-3])
if beta > np.pi:
beta -= 2*np.pi
elif beta < -np.pi:
beta += 2*np.pi
beta_dot = obs[-1]
state = np.array([r_norm, p_norm, alpha, alpha_dot, beta, beta_dot]).astype(np.float32)
return state
def discretize_state(state, boxes):
"""
Converts continuous state into discrete states
Args:
state:
boxes:
Returns:
index: state index in Q table, represent in tuple
"""
# match state into box
index = []
for i_s, st in enumerate(state):
for i_b, box in enumerate(boxes[i_s]):
if st >= box[0] and st <= box[1]:
index.append(i_b)
break
assert len(index) == 6
return tuple(index)
def generate_action_sequence(num_sequences, len_horizon, num_actions):
""" Generate S random action sequences with H horizon
"""
action_sequences = np.zeros((num_sequences, len_horizon))
for s in range(num_sequences):
for h in range(len_horizon):
action_sequences[s,h] = random.randrange(num_actions)
return action_sequences
def sample_to_batch(samples_list, num_states, num_actions):
""" Create training batch from sampled memory
"""
x_batch = np.zeros((len(samples_list), num_states+num_actions))
y_batch = np.zeros((len(samples_list), num_states))
for i, s in enumerate(samples_list):
onehot_action = np.zeros(num_actions)
onehot_action[s[1]] = 1
x_batch[i] = np.concatenate((s[0], onehot_action))
y_batch[i] = s[-1]
return x_batch, y_batch
def create_dataset(input_features, output_labels, batch_size, shuffle=True, num_epochs=None):
""" Create TF dataset from numpy arrays
"""
dataset = tf.data.Dataset.from_tensor_slices((input_features, output_labels))
if shuffle:
dataset = dataset.shuffle(buffer_size = 1000)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(num_epochs)
return dataset
def shoot_action(model, action_sequences, state, goal):
""" Find an action with most reward using random shoot
"""
sequence_rewards = np.zeros(action_sequences.shape[0])
# Compute reward for every sequence
for seq in range(action_sequences.shape[0]):
old_state = np.array(state).reshape(1,-1).astype(np.float32)
# print("old_state: {}".format(old_state)) # debug
reward_in_horizon = 0
for hor in range(action_sequences.shape[1]):
action =
|
np.array([[action_sequences[seq,hor]]])
|
numpy.array
|
"""
Defines the FOGIDiagram class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
import scipy.linalg as _spl
import collections as _collections
from ..objects import Basis as _Basis
from ..objects.fogistore import FirstOrderGaugeInvariantStore as _FOGIStore
from ..tools import matrixtools as _mt
import matplotlib.cm as _matplotlibcm
from matplotlib.colors import LinearSegmentedColormap as _LinearSegmentedColormap
try:
import drawSvg as _draw
except ImportError:
_draw = None
#_Hcmap = _matplotlibcm.get_cmap('Reds_r')
#_Scmap = _matplotlibcm.get_cmap('Blues_r')
_cdict = {'red': [[0.0, 1.0, 1.0],
[1.0, 1.0, 1.0]],
'green': [[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]],
'blue': [[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]]}
_Hcmap = _LinearSegmentedColormap('lightReds', segmentdata=_cdict, N=256)
_cdict = {'red': [[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]],
'blue': [[0.0, 1.0, 1.0],
[1.0, 1.0, 1.0]],
'green': [[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]]}
_Scmap = _LinearSegmentedColormap('lightBlues', segmentdata=_cdict, N=256)
## - create table/heatmap of relational & gate-local strengths - also filter by ham/sto
# - create table of all quantities to show structure
def _create_errgen_op(vec, list_of_mxs):
return sum([c * mx for c, mx in zip(vec, list_of_mxs)])
def _is_dependent(infos_by_type):
for typ, infos_by_actedon in infos_by_type.items():
for acted_on, infos in infos_by_actedon.items():
for info in infos:
if info['dependent'] is False:
return False
return True
def _dstr(d, joinstr="<br>"): # dict-to-string formatting function
if len(d) == 1: return "%.3g" % _np.real_if_close(next(iter(d.values())))
return joinstr.join(["%s: %.3g" % (k, _np.real_if_close(v)) for k, v in d.items()])
def _fmt_tableval(val):
if isinstance(val, dict):
if len(val) == 1: return _fmt_tableval(next(iter(val.values())))
return " <br> ".join(["%s: %s" % (k, _fmt_tableval(v)) for k, v in val.items()])
if _np.iscomplex(val): val = _np.real_if_close(val)
if _np.isreal(val) or _np.iscomplex(val):
if abs(val) < 1e-6: val = 0.0
if _np.isreal(val): return "%.3g" % val.real
else: return "%.3g + %.3gj" % (val.real, val.imag)
return str(val)
def _make_table(table_info, rowlbl, title):
table_dict, table_rows, table_cols = table_info
html = "<table><thead><tr><th colspan=%d>%s</th></tr>\n" % (len(table_cols) + 1, title)
html += ("<tr><th>%s<th>" % rowlbl) + "</th><th>".join(table_cols) + "</th></tr></thead><tbody>\n"
for row in table_rows:
table_row_text = []
for col in table_cols:
val = table_dict[row][col]
table_row_text.append(_fmt_tableval(val))
html += "<tr><th>" + str(row) + "</th><td>" + "</td><td>".join(table_row_text) + "</td></tr>\n"
return html + "</tbody></table>"
class FOGIDiagram(object):
"""
A diagram of the first-order-gauge-invariant (FOGI) quantities of a model.
This class encapsulates a way of visualizing a model's FOGI quantities.
"""
def __init__(self, fogi_stores, op_coefficients, model_dim, op_to_target_qubits=None, impact_mode='current'):
# Note: fogi_store can one or multiple (a list of) stores
self.impact_mode = impact_mode
self.fogi_stores = [fogi_stores] if isinstance(fogi_stores, _FOGIStore) else fogi_stores
self.fogi_comps_by_store = [fogi_store.opcoeffs_to_fogi_components_array(op_coefficients)
for fogi_store in self.fogi_stores]
self.fogi_coeff_offsets = _np.cumsum([0] + [len(coeffs) for coeffs in self.fogi_comps_by_store])[0:-1]
self.fogi_comps = _np.concatenate(self.fogi_comps_by_store)
self.fogi_infos_by_store = [fogi_store.create_binned_fogi_infos()
for fogi_store in self.fogi_stores]
self.fogi_infos = _FOGIStore.merge_binned_fogi_infos(self.fogi_infos_by_store, self.fogi_coeff_offsets)
#Construct op_to_target_qubits if needed
if op_to_target_qubits is None:
all_qubits = set()
for fogi_store in self.fogi_stores:
for op_label in fogi_store.primitive_op_labels:
if op_label.sslbls is not None:
all_qubits.update(op_label.sslbls)
all_qubits = tuple(sorted(all_qubits))
op_to_target_qubits = {op_label: op_label.sslbls if (op_label.sslbls is not None) else all_qubits
for op_label in fogi_stores.primitive_op_labels
for fogi_store in self.fogi_stores}
self.op_to_target_qubits = op_to_target_qubits
#We need the gauge basis to construct actual gauge generators for computation of J-angle below.
# these are un-normalized when we construct the gauge action in, e.g. first_order_ham_gauge_action_matrix(...)
# and so they must be un-normalized (standard Pauli mxs) here.
normalized_pauli_basis = _Basis.cast('pp', model_dim)
scale = model_dim**(0.25) # to change to standard pauli-product matrices
self.gauge_basis_mxs = [mx * scale for mx in normalized_pauli_basis.elements[1:]]
# op-sets (sets of operations) correspond to units/nodes on diagrams, so it's useful
# to have dictionaries of all the summarized information about all the fogi quantities
# living on a given set of operations.
self.op_set_info = {}
for op_set, op_fogi_infos_by_type in self.fogi_infos.items():
total = {}
flat_H_infos = [info for acted_on, infos in op_fogi_infos_by_type.get(('H',), {}).items() for info in infos]
flat_S_infos = [info for acted_on, infos in op_fogi_infos_by_type.get(('S',), {}).items() for info in infos]
if len(flat_H_infos) > 0: total['H'] = self._contrib(('H',), op_set, flat_H_infos)
if len(flat_S_infos) > 0: total['S'] = self._contrib(('S',), op_set, flat_S_infos)
total['mag'] = sum([self._extract_mag(contrib) for contrib in total.values()])
self.op_set_info[op_set] = {
'total': total,
'hs_support_table': self._make_coherent_stochastic_by_support_table(op_set, op_fogi_infos_by_type),
'individual_fogi_table': self._make_individual_fogi_table(op_set, op_fogi_infos_by_type),
'abbrev_individual_fogi_table': self._make_abbrev_table(op_set, op_fogi_infos_by_type),
'byweight': self._compute_by_weight_magnitudes(op_set, op_fogi_infos_by_type),
'bytarget': self._compute_by_target_magnitudes(op_set, op_fogi_infos_by_type),
'dependent': _is_dependent(op_fogi_infos_by_type),
}
assert(('H', 'S') not in op_fogi_infos_by_type)
def _contrib(self, typ, op_set, infos_to_aggregate):
def _sto_local_contrib(opt_set, infos):
error_rate = 0
for info in infos:
fogi_vec = info['fogi_dir'] / _np.linalg.norm(info['fogi_dir'])**2
vec_rate = sum(_np.abs(fogi_vec)) # L1 norm gives error rate of vector
#sto rates should all have normalized fogi_dirs so rate = 1 == L1 norm of *vec* = *dir* / L2(*dir*)^2
assert(_np.isclose(vec_rate, 1.0)) # if we've normalized correctly, this should be true
error_rate += self.fogi_comps[info['fogi_index']] * vec_rate
return {'error_rate': error_rate}
def _sto_relational_contrib(opt_set, infos):
error_rate = 0
for info in infos:
fogi_vec = info['fogi_dir'] / _np.linalg.norm(info['fogi_dir'])**2
vec_rate = sum(fogi_vec) # sum of elements gives error rate of vector
# HERE - intrinsic relational rate addition is still a bit mysterious -- we should allow for
# negative rates to balance positive ones to get an overall error rate, but maybe it's more
# complicated than just adding teh elements up as we do above...
error_rate += self.fogi_comps[info['fogi_index']] * vec_rate
assert(abs(error_rate.imag) < 1e-6)
return {'error_rate': error_rate.real} # maybe negative rates are ok (?) -- we could take abs here.
def _ham_local_contrib(op_set, infos):
assert(len(op_set) == 1)
op_label = tuple(op_set)[0]
# Compute j-angle(error generated by sum_i(comp_i * e_i)) where
# i = info index and e_i = (normalized) fogi direction of i-th fogi qty.
if len(infos) == 0: return {op_label: 0.0, 'min_jangle': 0.0}
si = infos[0]['store_index'] # all infos must come from same *store*
op_indices_slc = self.fogi_stores[si].op_errorgen_indices[op_set[0]]
errgen_vec = _np.zeros((op_indices_slc.stop - op_indices_slc.start), complex)
for info in infos:
assert(set(op_set) == info['op_set'])
comp = self.fogi_comps[info['fogi_index']]
fogi_vec = info['fogi_dir'] / _np.linalg.norm(info['fogi_dir'])**2
# fogi vec = "inverse" of fogi dir because all local fogi dirs are orthonormal but not necessarily
# normalized - so dividing by the norm^2 here => dot(fogi_dir, fogi_vec) = 1.0 as desired.
# (we treat fogi_dir as if it were a member of an orthogonal basis when adding contributions)
errgen_vec += comp * fogi_vec[op_indices_slc]
errgen_op = _create_errgen_op(errgen_vec, self.gauge_basis_mxs) # NOTE: won't work for reduced models
j_angle = _mt.jamiolkowski_angle(errgen_op)
return {op_label: j_angle, 'min_jangle': j_angle}
def _ham_relational_contrib(op_set, infos):
if len(infos) == 0:
ret = {'min_jangle': 0.0}
ret.update({op_label: 0.0 for op_label in op_set})
return ret
# Compute j-angle(error generated by gauge-action-on-OP( sum_i(theta_i * gauge_vec_i)) )
# for each OP in op_set, where gauge_vec_i is the (normalized) gauge directions
# corresponding to the i-th fogi quantity (info).
si = infos[0]['store_index'] # all infos must come from same *store*
E = _np.column_stack([info['gauge_dir'] for info in infos])
theta =
|
_np.array([self.fogi_comps[info['fogi_index']] / info['r_factor'] for info in infos])
|
numpy.array
|
import numpy as np
import struct
import os
import re
from MiscLibs.common_functions import pol2cart, valid_number, nans
class Pd0TRDI(object):
"""Class to read data from PD0 files
Attributes
----------
file_name: str
Full name including path of pd0 file to be read
Hdr: Hdr
Object of Hdr for heading information
Inst: Inst
Object of Inst to hold instrument information
Cfg: Cfg
Object of Cfg to hold configuration information
Sensor: Sensor
Object of Sensor to hold sensor data
Wt: Wt
Object of Wt to hold water track data
Bt: Bt
Object of Bt to hold bottom track data
Gps: Gps
Object of Gps to hold GPS data from previous versions of WR
Gps2: Gps2
Object of Gps2 to hold GPS data from WR2
Surface: Surface
Object of Surface to hold surface cell data
AutoMode: AutoMode
Object of AutoMode to hold auto configuration settings
Nmea: Nmea
Object of Nmea to hold Nmea data
"""
def __init__(self, file_name):
"""Constructor initializing instance variables.
Parameters
----------
file_name: str
Full name including path of pd0 file to be read
"""
self.file_name = file_name
self.Hdr = None
self.Inst = None
self.Cfg = None
self.Sensor = None
self.Wt = None
self.Bt = None
self.Gps = None
self.Gps2 = None
self.Surface = None
self.AutoMode = None
self.Nmea = None
self.pd0_read(file_name)
def create_objects(self, n_ensembles, n_types, n_bins, max_surface_bins,
n_velocities, wr2=False):
"""Create objects for instance variables.
Parameters
----------
n_ensembles: int
Number of ensembles
n_types: int
Number of data types
n_bins: int
Number of bins or depth cells
max_surface_bins: int
Maximum number of surface cells
n_velocities: int
Number of velocities
wr2: bool
Whether WR2 processing of GPS data should be applied
"""
self.Hdr = Hdr(n_ensembles, n_types)
self.Inst = Inst(n_ensembles)
self.Cfg = Cfg(n_ensembles)
self.Sensor = Sensor(n_ensembles)
self.Wt = Wt(n_bins, n_ensembles, n_velocities)
self.Bt = Bt(n_ensembles, n_velocities)
self.Gps = Gps(n_ensembles)
self.Gps2 = Gps2(n_ensembles, wr2)
self.Surface = Surface(n_ensembles, n_velocities, max_surface_bins)
self.AutoMode = AutoMode(n_ensembles)
self.Nmea = Nmea(n_ensembles)
def pd0_read(self, fullname, wr2=False):
"""Reads the binary pd0 file and assigns values to object instance variables.
Parameters
----------
fullname: str
Full file name including path
wr2: bool
Determines if WR2 processing should be applied to GPS data
"""
# Assign default values
n_velocities = 4
max_surface_bins = 5
# Check to ensure file exists
if os.path.exists(fullname):
file_info = os.path.getsize(fullname)
if file_info > 0:
# Open file for processing
with open(fullname, 'rb') as f:
# Read leader ID
leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
# Leader ID 7f7f marks beginning of ensemble
if leader_id != '0x7f7f':
while leader_id != '0x7f7f':
f.seek(-1, 1)
leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
# Read header information
initial_pos = f.tell()-2
bytes_per_ens = np.fromfile(f, dtype=np.uint16, count=1)[0]
f.seek(1, 1)
n_types = np.fromfile(f, np.uint8, count=1)[0]
offset = np.fromfile(f, np.uint16, count=1)[0]
f.seek(initial_pos+offset+8, 0)
n_beams = np.fromfile(f, np.uint8, count=1)[0]
n_bins = np.fromfile(f, np.uint8, count=1)[0]
# Determine number of ensembles in the file to allow pre-allocation of arrays
n_ensembles = Pd0TRDI.number_of_ensembles(f, file_info)
# Create objects and pre-allocate arrays
self.create_objects(n_ensembles=n_ensembles,
n_types=n_types,
n_bins=n_bins,
max_surface_bins=max_surface_bins,
n_velocities=n_velocities)
# Initialize counters and variables
i_ens = -1
end_file_check = 0
end_file = file_info
i_data_types = 0
n_data_types = 1
file_loc = 0
i2022 = 0
j100, j101, j102, j103 = -1, -1, -1, -1
rr_bt_depth_correction = np.tile(np.nan, (n_beams, n_ensembles))
# Reset position in file
f.seek(initial_pos, 0)
# Begin reading file
while end_file_check < end_file:
# Read leader ID
leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
if i_data_types >= n_data_types and leader_id != '0x7f7f':
leader_id = '0x9999'
# 7f7f marks the beginning of an ensemble
if leader_id == '0x7f7f':
i2022 = 0
file_loc = f.tell() - 2
# Check for last ensemble in file
if file_loc+bytes_per_ens > end_file and i_ens >= n_ensembles:
end_file_check = end_file+1
else:
# Process ensemble
i_data_types = 0
store_file_loc = f.tell()
bytes_per_ens = np.fromfile(f, np.uint16, count=1)[0]
# Check check_sum
if self.check_sum(f, file_loc, bytes_per_ens):
f.seek(file_loc+5, 0)
n_data_types = np.fromfile(f, np.uint8, count=1)[0]
data_offsets = np.fromfile(f, np.uint16, count=n_data_types)
# Find variable leader ID
while i_data_types+1 <= n_data_types and leader_id != '0x80':
f.seek(data_offsets[i_data_types]+file_loc, 0)
leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
i_data_types += 1
# Check for consecutive ensemble numbers
if i_ens > -1 and leader_id == '0x80':
ens_num = np.fromfile(f, np.uint16, count=1)[0]
ens_num_diff = ens_num - self.Sensor.num[i_ens]
if ens_num_diff > 1:
for nn in range(0, int(ens_num_diff-1)):
if i_ens < n_ensembles:
self.Sensor.num[i_ens] = self.Sensor.num[i_ens-1]+1
i_ens += 1
elif ens_num_diff < 1:
i_ens -= 1
else:
self.bad_check_sum(f, file_loc)
# Initialize variables
f.seek(store_file_loc, 0)
i_data_types = 0
j100, j101, j102, j103 = -1, -1, -1, -1
i_ens += 1
# Read bytes in this ensemble
self.Hdr.bytes_per_ens[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
# If checksum is valid read header data
if self.check_sum(f, file_loc, int(self.Hdr.bytes_per_ens[i_ens])):
# Read number of data types
f.seek(file_loc+5, 0)
self.Hdr.n_data_types[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
# Read data offsets
test = np.fromfile(f, np.uint16, count=int(self.Hdr.n_data_types[i_ens]))
if test.shape[0] > self.Hdr.data_offsets.shape[1]:
self.Hdr.data_offsets.resize(n_ensembles, test.shape[0])
self.Hdr.data_offsets[i_ens, 0:int(self.Hdr.n_data_types[i_ens])] = \
test[0:int(self.Hdr.n_data_types[i_ens])]
# Check for end of data types
self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
else:
self.bad_check_sum(f, file_loc)
i_data_types = -1
# Read binary fixed leader data
elif leader_id == '0x0':
# Update data types counter
i_data_types += 1
# Read and decode firmware version
self.Inst.firm_ver[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Inst.firm_ver[i_ens] = self.Inst.firm_ver[i_ens] + \
np.fromfile(f, np.uint8, count=1)[0] / 100
# Read and decode instrument characteristics
bitls = np.fromfile(f, np.uint8, count=1)[0]
bitls = "{0:08b}".format(bitls)
val = int(bitls[5:], 2)
if val == 0:
self.Inst.freq[i_ens] = 75
elif val == 1:
self.Inst.freq[i_ens] = 150
elif val == 2:
self.Inst.freq[i_ens] = 300
elif val == 3:
self.Inst.freq[i_ens] = 600
elif val == 4:
self.Inst.freq[i_ens] = 1200
elif val == 5:
self.Inst.freq[i_ens] = 2400
else:
self.Inst.freq[i_ens] = np.nan
val = int(bitls[4], 2)
if val == 0:
self.Inst.pat[i_ens] = 'Concave'
elif val == 1:
self.Inst.pat[i_ens] = 'Convex'
else:
self.Inst.pat[i_ens] = 'n/a'
self.Inst.sensor_CFG[i_ens] = int(bitls[2:3], 2) + 1
val = int(bitls[1], 2)
if val == 0:
self.Inst.xducer[i_ens] = 'Not Attached'
elif val == 1:
self.Inst.xducer[i_ens] = 'Attached'
else:
self.Inst.xducer[i_ens] = 'n/a'
val = int(bitls[0], 2)
if val == 0:
self.Sensor.orient[i_ens] = 'Down'
elif val == 1:
self.Sensor.orient[i_ens] = 'Up'
else:
self.Sensor.orient[i_ens] = 'n/a'
bitms = np.fromfile(f, np.uint8, count=1)[0]
bitms = "{0:08b}".format(bitms)
val = int(bitms[6:], 2)
if val == 0:
self.Inst.beam_ang[i_ens] = 15
elif val == 1:
self.Inst.beam_ang[i_ens] = 20
elif val == 2:
self.Inst.beam_ang[i_ens] = 30
elif val == 3:
self.Inst.beam_ang[i_ens] = np.nan
else:
self.Inst.beam_ang[i_ens] = np.nan
val = int(bitms[:4], 2)
if val == 4:
self.Inst.beams[i_ens] = 4
elif val == 5:
self.Inst.beams[i_ens] = 5
self.Inst.demod[i_ens] = 1
elif val == 15:
self.Inst.beams[i_ens] = 5
self.Inst.demod[i_ens] = 2
else:
self.Inst.beams[i_ens] = np.nan
self.Inst.demod[i_ens] = np.nan
val = np.fromfile(f, np.uint8, count=1)[0]
if val == 0:
self.Inst.data_type[i_ens] = 'Real'
else:
self.Inst.data_type[i_ens] = 'Simu'
# Position file pointer and read configuration information
f.seek(1, 1)
self.Cfg.n_beams[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.wn[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.wp[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Cfg.ws_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Cfg.wf_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Cfg.wm[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.wc[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.code_reps[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.wg_per[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.we_mmps[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Cfg.tp_sec[i_ens] = np.sum(np.fromfile(f, np.uint8, count=3) * np.array([60, 1, 0.01]))
self.Cfg.ex[i_ens] = "{0:08b}".format(ord(f.read(1)))
val = int(self.Cfg.ex[i_ens][3:5], 2)
if val == 0:
self.Cfg.coord_sys[i_ens] = 'Beam'
elif val == 1:
self.Cfg.coord_sys[i_ens] = 'Inst'
elif val == 2:
self.Cfg.coord_sys[i_ens] = 'Ship'
elif val == 3:
self.Cfg.coord_sys[i_ens] = 'Earth'
else:
self.Cfg.coord_sys[i_ens] = "N/a"
val = int(self.Cfg.ex[i_ens][5], 2)
if val == 0:
self.Cfg.use_pr = 'No'
elif val == 1:
self.Cfg.use_pr = 'Yes'
else:
self.Cfg.use_pr = 'N/a'
val = int(self.Cfg.ex[i_ens][6], 2)
if val == 0:
self.Cfg.use_3beam = 'No'
elif val == 1:
self.Cfg.use_3beam = 'Yes'
else:
self.Cfg.use_3beam = 'N/a'
val = int(self.Cfg.ex[i_ens][7], 2)
if val == 0:
self.Cfg.map_bins = 'No'
elif val == 1:
self.Cfg.map_bins = 'Yes'
else:
self.Cfg.map_bins = 'N/a'
self.Cfg.ea_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] * 0.01
self.Cfg.ea_deg[i_ens] = np.fromfile(f, np.uint16, count=1)[0] * 0.01
self.Cfg.ez[i_ens] = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0])
val = int(self.Cfg.ez[i_ens][:2], 2)
if val == 0:
self.Cfg.sos_src[i_ens] = 'Manual EC'
elif val == 1:
self.Cfg.sos_src[i_ens] = 'Calculated'
elif val == 3:
self.Cfg.sos_src[i_ens] = 'SVSS Sensor'
else:
self.Cfg.sos_src[i_ens] = 'N/a'
val = int(self.Cfg.ez[i_ens][2], 2)
if val == 0:
self.Cfg.xdcr_dep_srs[i_ens] = 'Manual ED'
if val == 1:
self.Cfg.xdcr_dep_srs[i_ens] = 'Sensor'
else:
self.Cfg.xdcr_dep_srs[i_ens] = 'N/a'
val = int(self.Cfg.ez[i_ens][3], 2)
if val == 0:
self.Cfg.head_src[i_ens] = 'Manual EH'
if val == 1:
self.Cfg.head_src[i_ens] = 'Int. Sensor'
else:
self.Cfg.head_src[i_ens] = 'N/a'
val = int(self.Cfg.ez[i_ens][4], 2)
if val == 0:
self.Cfg.pitch_src[i_ens] = 'Manual EP'
if val == 1:
self.Cfg.pitch_src[i_ens] = 'Int. Sensor'
else:
self.Cfg.pitch_src[i_ens] = 'N/a'
val = int(self.Cfg.ez[i_ens][5], 2)
if val == 0:
self.Cfg.roll_src[i_ens] = 'Manual ER'
if val == 1:
self.Cfg.roll_src[i_ens] = 'Int. Sensor'
else:
self.Cfg.roll_src[i_ens] = 'N/a'
val = int(self.Cfg.ez[i_ens][6], 2)
if val == 0:
self.Cfg.xdcr_dep_srs[i_ens] = 'Manual ES'
if val == 1:
self.Cfg.xdcr_dep_srs[i_ens] = 'Int. Sensor'
else:
self.Cfg.xdcr_dep_srs[i_ens] = 'N/a'
val = int(self.Cfg.ez[i_ens][7], 2)
if val == 0:
self.Cfg.temp_src[i_ens] = 'Manual ET'
if val == 1:
self.Cfg.temp_src[i_ens] = 'Int. Sensor'
else:
self.Cfg.temp_src[i_ens] = 'N/a'
self.Cfg.sensor_avail[i_ens] = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0])
self.Cfg.dist_bin1_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Cfg.xmit_pulse_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Cfg.ref_lay_str_cell[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.ref_lay_end_cell[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.wa[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.cx[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.lag_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Cfg.cpu_ser_no[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.wb[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.cq[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
# Check if more data types need to be read and position the pointer
self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
# Read variable leader data
elif leader_id == '0x80':
# Update the data types counter
i_data_types += 1
# Read instrument clock and sensor data
self.Sensor.num[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Sensor.date_not_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=3)
self.Sensor.time[i_ens, :] = np.fromfile(f, np.uint8, count=4)
self.Sensor.num_fact[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.num_tot[i_ens] = self.Sensor.num[i_ens] + self.Sensor.num_fact[i_ens]*65535
self.Sensor.bit_test[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Sensor.sos_mps[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Sensor.xdcr_depth_dm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Sensor.heading_deg[i_ens] = np.fromfile(f, np.uint16, count=1)[0] / 100.
self.Sensor.pitch_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100.
self.Sensor.roll_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100.
self.Sensor.salinity_ppt[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
self.Sensor.temperature_deg_c[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100.
self.Sensor.mpt_msc[i_ens, :] = np.fromfile(f, np.uint8, count=3)
self.Sensor.heading_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.pitch_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1)[0] / 10.
self.Sensor.roll_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1) / 10.
self.Sensor.xmit_current[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.xmit_voltage[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.ambient_temp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.pressure_pos[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.pressure_neg[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.attitude_temp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.attitude[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.contam_sensor[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Sensor.error_status_word[i_ens] = ["{0:08b}".format(x)
for x in np.fromfile(f, np.uint8, count=4)]
f.seek(2, 1)
self.Sensor.pressure_pascal[i_ens] = np.fromfile(f, np.uint32, count=1)[0]
self.Sensor.pressure_var_pascal[i_ens] = np.fromfile(f, np.uint32, count=1)[0]
f.seek(1, 1)
self.Sensor.date_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=4)
self.Sensor.time_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=4)
self.Sensor.date[i_ens, :] = self.Sensor.date_not_y2k[i_ens, :]
self.Sensor.date[i_ens, 0] = self.Sensor.date_y2k[i_ens, 0] * 100 + \
self.Sensor.date_y2k[i_ens, 1]
self.Cfg.lag_near_bottom[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
# Check if more data types need to be read and position the pointer
self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
# Read water-tracking velocity data
elif leader_id == '0x100':
# Update the data types counter
i_data_types += 1
if self.Cfg.wn[i_ens] > self.Wt.vel_mps.shape[1]:
append = np.zeros([self.Wt.vel_mps.shape[0],
int(self.Cfg.wn[i_ens] - self.Wt.vel_mps.shape[1]),
self.Wt.vel_mps.shape[2]])
self.Wt.vel_mps = np.hstack([self.Wt.vel_mps, append])
dummy = np.fromfile(f, np.int16, count=int(self.Cfg.wn[i_ens]*4))
dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
self.Wt.vel_mps[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
# Check if more data types need to be read and position the pointer
self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
# Read correlation magnitude
elif leader_id == '0x200':
# Update the data types counter
i_data_types += 1
if self.Cfg.wn[i_ens] > self.Wt.corr.shape[1]:
append = np.zeros([self.Wt.corr.shape[0],
int(self.Cfg.wn[i_ens] - self.Wt.corr.shape[1]),
self.Wt.corr.shape[2]])
self.Wt.corr = np.hstack([self.Wt.corr, append])
dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4))
dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
self.Wt.corr[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
# Check if more data types need to be read and position the pointer
self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
# Read echo intensity
elif leader_id == '0x300':
# Update the data types counter
i_data_types += 1
if self.Cfg.wn[i_ens] > self.Wt.rssi.shape[1]:
append = np.zeros([self.Wt.rssi.shape[0],
int(self.Cfg.wn[i_ens] - self.Wt.rssi.shape[1]),
self.Wt.rssi.shape[2]])
self.Wt.rssi = np.hstack([self.Wt.rssi, append])
dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4))
dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
self.Wt.rssi[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
# Check if more data types need to be read and position the pointer
self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
# Read percent-good data
elif leader_id == '0x400':
# Update the data types counter
i_data_types += 1
if self.Cfg.wn[i_ens] > self.Wt.pergd.shape[1]:
self.Cfg.wn[i_ens] = self.Wt.pergd.shape[1]
dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4))
dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
self.Wt.pergd[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
# Check if more data types need to be read and position the pointer
self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
# Read bottom track data
elif leader_id == '0x600':
# Update the data types counter
i_data_types += 1
# Read bottom track configuration data
self.Cfg.bp[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
long1 = np.fromfile(f, np.uint16, count=1)[0]
self.Cfg.bc[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.ba[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.bg[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.bm[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Cfg.be_mmps[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
# Read winriver 10.06 fromat GPS data
self.Gps.lat_deg[i_ens] = (np.fromfile(f, np.int32, count=1)[0]/2**31) * 180
# Read the Least Significant Bytes for beam depths
dummy = np.fromfile(f, np.uint16, count=4)
self.Bt.depth_m[0:4, i_ens] = dummy.T
# Read bottom-track velocities
dummy = np.fromfile(f, np.int16, count=4)
self.Bt.vel_mps[0:4, i_ens] = dummy.T
# Read bottom-track correlations
dummy = np.fromfile(f, np.uint8, count=4)
self.Bt.corr[0:4, i_ens] = dummy.T
# Read bottom-track evaluation amplitude
dummy = np.fromfile(f, np.uint8, count=4)
self.Bt.eval_amp[0:4, i_ens] = dummy.T
# Read bottom-track percent good
dummy = np.fromfile(f, np.uint8, count=4)
self.Bt.pergd[0:4, i_ens] = dummy.T
# Read WinRiver 10.06 format GPS data
dummy = np.fromfile(f, np.uint16, count=1)[0]
if dummy != -32768:
self.Gps.alt_m[i_ens] = (dummy-32768)/10
else:
self.Gps.altm[i_ens] = np.nan
long2 = np.fromfile(f, np.uint16, count=1)[0]
self.Gps.long_deg[i_ens] = ((long1+long2*2**16)/2**31)*180
if self.Gps.long_deg[i_ens] > 180:
self.Gps.long_deg[i_ens] -= 360
self.Bt.ext_depth_cm[i_ens] = np.fromfile(f, np.int16, count=1)[0]
dummy = np.fromfile(f, np.int16, count=1)[0]
if dummy != -32768:
self.Gps.gga_vel_e_mps[i_ens] = dummy * -1 / 1000
else:
self.Gps.gga_vel_e_mps[i_ens] = np.nan
dummy = np.fromfile(f, np.int16, count=1)[0]
if dummy != -32768:
self.Gps.gga_vel_n_mps[i_ens] = dummy * -1 / 1000
else:
self.Gps.gga_vel_n_mps[i_ens] = np.nan
dummy = np.fromfile(f, np.int16, count=1)[0]
if dummy != -32768:
self.Gps.vtg_vel_e_mps[i_ens] = dummy * -1 / 1000
else:
self.Gps.vtg_vel_e_mps[i_ens] = np.nan
dummy = np.fromfile(f, np.int16, count=1)[0]
if dummy != -32768:
self.Gps.vtg_vel_n_mps[i_ens] = dummy * -1 / 1000
else:
self.Gps.vtg_vel_n_mps[i_ens] = np.nan
dummy = np.fromfile(f, np.uint8, count=1)[0]
if dummy != 0:
self.Gps.gsa_v_dop[i_ens] = dummy
dummy = np.fromfile(f, np.uint8, count=1)[0]
if dummy != 0:
self.Gps.gsa_p_dop[i_ens] = dummy
dummy = np.fromfile(f, np.uint8, count=1)[0]
if dummy != 0:
self.Gps.gga_n_stats[i_ens, 0] = dummy
f.seek(1, 1)
self.Gps.gsa_sat[i_ens, 4] = np.fromfile(f, np.uint8, count=1)[0]
self.Gps.gsa_sat[i_ens, 5] = np.fromfile(f, np.uint8, count=1)[0]
self.Gps.gga_diff[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
dummy = np.fromfile(f, np.uint8, count=1)[0]
if dummy != 0:
self.Gps.gga_hdop[i_ens] = dummy / 10
self.Gps.gsa_sat[i_ens, 0] = np.fromfile(f, np.uint8, count=1)[0]
self.Gps.gsa_sat[i_ens, 1] = np.fromfile(f, np.uint8, count=1)[0]
self.Gps.gsa_sat[i_ens, 2] = np.fromfile(f, np.uint8, count=1)[0]
self.Gps.gsa_sat[i_ens, 3] = np.fromfile(f, np.uint8, count=1)[0]
# Read bx configuration setting
self.Cfg.bx_dm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
# Read bottom-tracking RSSI
self.Bt.rssi[0, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Bt.rssi[1, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Bt.rssi[2, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
self.Bt.rssi[3, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
# Read wj configuration setting
self.Cfg.wj[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
# Read most significant byte and compute beam depths
dummy = np.fromfile(f, np.uint8, count=1)[0]
rr_bt_depth_correction[0:4, i_ens] = dummy.T * 2e16 / 100
# Check if more data types need to be read and position the pointer
self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
# Read General NMEA Structure
# Data type '2022' contains sub data types the identify specfic NMEA
# 0183 data types that will be decoded. There may be multiple values
# for a single ensemble.
elif leader_id == '0x2022':
i2022 += 1
# Update the data types counter
i_data_types += 1
specific_id = np.fromfile(f, np.int16, count=1)[0]
msg_size = np.fromfile(f, np.int16, count=1)[0]
delta_time = np.fromfile(f, np.double, count=1)[0]
# GGA
if specific_id == 100:
j100 += 1
# If the number of values exceeds 20 expand arrays
if j100 > self.Gps2.gga_delta_time.shape[1] - 1:
self.Gps2.gga_expand(n_ensembles, i_ens)
self.Gps2.gga_delta_time[i_ens, j100] = delta_time
self.Gps2.gga_header[i_ens][j100] = ''.join([chr(x) for x in f.read(10)])
try:
temp = ''.join([chr(x) for x in f.read(10)])
self.Gps2.utc[i_ens, j100] = float(re.match('[0-9]+\.[0-9]',
temp).string.rstrip('\x00'))
except ValueError:
self.Gps2.utc[i_ens, j100] = np.nan
self.Gps2.lat_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
self.Gps2.lat_ref[i_ens][j100] = chr(f.read(1)[0])
self.Gps2.lon_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
self.Gps2.lon_ref[i_ens][j100] = chr(f.read(1)[0])
self.Gps2.corr_qual[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
self.Gps2.num_sats[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
self.Gps2.hdop[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
self.Gps2.alt[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
self.Gps2.alt_unit[i_ens][j100] = chr(f.read(1)[0])
self.Gps2.geoid[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
self.Gps2.geoid_unit[i_ens][j100] = chr(f.read(1)[0])
self.Gps2.d_gps_age[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
self.Gps2.ref_stat_id[i_ens, j100] = np.fromfile(f, np.int16, count=0)[0]
# VTG
elif specific_id == 101:
j101 += 1
# If the number of values exceeds 20 expand arrays
if j101 > self.Gps2.vtg_delta_time.shape[1] - 1:
self.Gps2.vtg_expand(n_ensembles, i_ens)
self.Gps2.vtg_delta_time[i_ens, j101] = delta_time
self.Gps2.vtg_header[i_ens][j101] = ''.join([chr(x) for x in f.read(10)])
self.Gps2.course_true[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
self.Gps2.true_indicator[i_ens][j101] = chr(f.read(1)[0])
self.Gps2.course_mag[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
self.Gps2.mag_indicator[i_ens][j101] = chr(f.read(1)[0])
self.Gps2.speed_knots[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
self.Gps2.kph_indicator[i_ens][j101] = chr(f.read(1)[0])
self.Gps2.mode_indicator[i_ens][j101] = chr(f.read(1)[0])
# Depth sounder
elif specific_id == 102:
j102 += 1
if j102 > self.Gps2.dbt_delta_time.shape[1] - 1:
self.Gps2.dbt_expand(n_ensembles, i_ens)
self.Gps2.dbt_delta_time[i_ens, j102] = delta_time
self.Gps2.dbt_header[i_ens][j102] = ''.join([chr(x) for x in f.read(10)])
self.Gps2.depth_ft[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
self.Gps2.ft_indicator[i_ens][j102] = chr(f.read(1)[0])
self.Gps2.depth_m[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
self.Gps2.m_indicator[i_ens][j102] = chr(f.read(1)[0])
self.Gps2.depth_fath[i_ens, j102] =
|
np.fromfile(f, np.float32, count=1)
|
numpy.fromfile
|
import os
import pandas as pd
import numpy as np
import xgboost as xgb
import logging, pickle
import joblib
from sklearn.svm import SVR, LinearSVR, NuSVR
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import ElasticNetCV
from Fuzzy_clustering.version3.ClusterCombineManager.GA_param_search import EvolutionaryAlgorithmSearchCV
class sklearn_model(object):
def __init__(self, static_data, cluster_dir, rated, model_type, njobs, is_combine=False, path_group=None):
self.static_data = static_data
self.path_group = path_group
self.rated=rated
self.model_dir = os.path.join(cluster_dir, str.upper(model_type))
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.model_type = model_type
self.is_combine = is_combine
self.optimizer = 'deap'
self.istrained = False
self.cluster = os.path.basename(cluster_dir)
logger = logging.getLogger('deap_train_' + '_' + self.model_type + self.cluster)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.model_dir, 'log_deap_train_' + self.cluster + '.log'), 'w')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
self.logger=logger
try:
self.load(self.model_dir)
except:
pass
self.njobs=njobs
def compute_metrics(self, pred, y, rated):
if rated is None:
rated = y.ravel()
else:
rated = 1
err = np.abs(pred.ravel() - y.ravel()) / rated
sse = np.sum(np.square(pred.ravel() - y.ravel()))
rms = np.sqrt(np.mean(np.square(err)))
mae = np.mean(err)
mse = sse / y.shape[0]
return [sse, rms, mae, mse]
def fit_model1(self, model, params, cvs):
model.set_params(**params)
rms_val = []
rms_test = []
for cv in cvs:
model.fit(cv[0], cv[1].ravel())
ypred = model.predict(cv[2]).ravel()
if self.rated is None:
acc = np.mean(np.abs(ypred - cv[3].ravel()) / cv[3].ravel())
else:
acc = np.mean(np.abs(ypred - cv[3].ravel()))
rms_val.append(acc)
ypred = model.predict(cv[4]).ravel()
if self.rated is None:
acc = np.mean(np.abs(ypred - cv[5].ravel()) / cv[5].ravel())
else:
acc = np.mean(np.abs(ypred - cv[5].ravel()))
rms_test.append(acc)
return 0.4 * np.mean(rms_val) + 0.6 * np.mean(rms_test), np.mean(rms_test)
def train(self, cvs, init_params=[], FS=False, inner_jobs=1):
print('training with deap...')
X = np.vstack((cvs[0][0], cvs[0][2], cvs[0][4]))
if len(cvs[0][1].shape)==1 and len(cvs[0][5].shape)==1:
y = np.hstack((cvs[0][1], cvs[0][3], cvs[0][5]))
else:
y = np.vstack((cvs[0][1], cvs[0][3], cvs[0][5])).ravel()
self.D, self.N = X.shape
if 'elasticnet' in str.lower(self.model_type):
X_train = cvs[0][0]
y_train = cvs[0][1].reshape(-1, 1)
X_val = cvs[0][2]
y_val = cvs[0][3].reshape(-1, 1)
X_test = cvs[0][4]
y_test = cvs[0][5].reshape(-1, 1)
X_train = np.vstack((X_train, X_val, X_test))
y_train = np.vstack((y_train, y_val, y_test))
model = ElasticNetCV(cv=5, max_iter=4000)
model.fit(X_train, y_train.ravel())
self.best_params = model.get_params()
ypred = model.predict(X_test).ravel()
if self.rated is None:
self.accuracy = np.mean(np.abs(ypred - y_test.ravel()) / y_test.ravel())
else:
self.accuracy = np.mean(np.abs(ypred - y_test.ravel()))
self.acc_test = self.accuracy
self.model = model
self.logger.info('Best params')
self.logger.info(self.best_params)
self.logger.info('Final mae %s', str(self.acc_test))
self.logger.info('Final rms %s', str(self.accuracy))
self.logger.info('finish train for model %s', self.model_type)
self.istrained = True
self.save(self.model_dir)
return self.to_dict()
else:
if 'xgb' in str.lower(self.model_type):
params = {'learning_rate': np.logspace(-5, -1, num=6, base=10),
'max_depth': np.unique(np.linspace(1, 150, num=50).astype('int')),
'colsample_bytree': np.linspace(0.4, 1.0, num=60),
'colsample_bynode':
|
np.linspace(0.4, 1.0, num=60)
|
numpy.linspace
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 19 16:18:38 2021
@author: brsr
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mapproj import Areal, UnitVector, rodrigues, slerp, triangle_solid_angle
abc = np.zeros((3,3))
abc[0, :2] = [-0, -0.5]
abc[1, :2] = [ 0.5, -0.25]
abc[2, :2] = [-0.5, 0.5]
abc[:,2] = np.sqrt(1 - abc[:, 0]**2 - abc[:, 1]**2)
abc /= np.linalg.norm(abc, axis=1, keepdims=True)
detabc = np.linalg.det(abc)
midpoints = np.roll(abc, -1, axis=0) + np.roll(abc, 1, axis=0)
midpoints /= np.linalg.norm(midpoints, axis=1, keepdims=True)
edgecenters = np.cross(np.roll(abc, -1, axis=0), np.roll(abc, 1, axis=0))
angs = np.sum(np.roll(edgecenters, -1, axis=0)* np.roll(edgecenters, 1, axis=0), axis=1)
ncx = np.linalg.norm(edgecenters, axis=1)
edgecenters /= np.linalg.norm(edgecenters, axis=1, keepdims=True)
bisectors = np.roll(edgecenters, -1, axis=0) - np.roll(edgecenters, 1, axis=0)
bisectors /= np.linalg.norm(bisectors, axis=1, keepdims=True)
dots = np.sum(np.roll(abc, -1, axis=0)* np.roll(abc, 1, axis=0), axis=1)
t = np.linspace(0,1)[:,np.newaxis,np.newaxis]
#abcedges = slerp(np.roll(abc, -1, axis=0), np.roll(abc, 1, axis=0), t)
alltheta = np.linspace(0, 2*np.pi, 360)
#this is easier than figuring out how to make broadcasting cooperate
abcedges = np.zeros((len(t),3,3))
for i in range(3):
abcedges[:,i] = slerp(abc[i-1], abc[(i+1)%3], t).squeeze()
def plotinit():
fig = plt.figure()
ax = plt.axes()
patch = mpl.patches.Circle((0,0), radius=1, fill = False, edgecolor='k')
ax.add_artist(patch)
ax.set_xlim(-1,1)
ax.set_ylim(-1,1)
ax.set_aspect('equal')
ax.axis('off')
return fig, ax
ab = abc[0] @ abc[1]
bc = abc[1] @ abc[2]
ca = abc[2] @ abc[0]
axb = np.cross(abc[0], abc[1])
bxc = np.cross(abc[1], abc[2])
cxa = np.cross(abc[2], abc[0])
naxb =
|
np.linalg.norm(axb)
|
numpy.linalg.norm
|
import time
import gym
import gym.spaces
import numpy as np
import numpy.linalg as la
from beamngpy import BeamNGpy, Scenario, Vehicle, setup_logging
from beamngpy.sensors import Electrics, Damage
from shapely import affinity
from shapely.geometry import LinearRing, LineString, Polygon, Point
def normalise_angle(angle):
if angle < 0:
angle += np.pi * 2
return angle
def calculate_curvature(points, idx):
p1 = points[idx - 1]
p2 = points[idx + 0]
p3 = points[idx + 1]
curvature = 2 * (
(p2[0] - p1[0]) * (p3[1] - p2[1]) -
(p2[1] - p1[1]) * (p3[0] - p2[0])) / (np.sqrt(
(np.square(p2[0] - p1[0]) + np.square(p2[1] - p1[1])) *
(np.square(p3[0] - p2[0]) + np.square(p3[1] - p2[1])) *
(np.square(p1[0] - p3[0]) + np.square(p1[1] - p3[1]))) + 0.00000001
)
return curvature
def calculate_inclination(points, idx):
p1 = points[idx - 1]
p3 = points[idx + 1]
inclination = p3[2] - p1[2]
return inclination
class WCARaceGeometry(gym.Env):
sps = 50
rate = 5
front_dist = 800
front_step = 100
trail_dist = 104
trail_step = 13
starting_proj = 1710
max_damage = 100
def __init__(self, host='localhost', port=64256):
self.steps = WCARaceGeometry.sps // WCARaceGeometry.rate
self.host = host
self.port = port
self.action_space = self._action_space()
self.observation_space = self._observation_space()
self.episode_steps = 0
self.spine = None
self.l_edge = None
self.r_edge = None
self.polygon = None
front_factor = WCARaceGeometry.front_dist / WCARaceGeometry.front_step
trail_factor = WCARaceGeometry.trail_dist / WCARaceGeometry.trail_step
self.front = lambda step: +front_factor * step
self.trail = lambda step: -trail_factor * step
self.bng = BeamNGpy(self.host, self.port)
self.vehicle = Vehicle('racecar', model='sunburst', licence='BEAMNG',
colour='red',
partConfig='vehicles/sunburst/hillclimb.pc')
electrics = Electrics()
damage = Damage()
self.vehicle.attach_sensor('electrics', electrics)
self.vehicle.attach_sensor('damage', damage)
scenario = Scenario('west_coast_usa', 'wca_race_geometry_v0')
scenario.add_vehicle(self.vehicle, pos=(394.5, -247.925, 145.25),
rot=(0, 0, 90))
scenario.make(self.bng)
self.bng.open(launch=True)
self.bng.set_deterministic()
self.bng.set_steps_per_second(WCARaceGeometry.sps)
self.bng.load_scenario(scenario)
self._build_racetrack()
self.observation = None
self.last_observation = None
self.last_spine_proj = None
self.bng.start_scenario()
self.bng.pause()
def __del__(self):
self.bng.close()
def _build_racetrack(self):
roads = self.bng.get_roads()
track = roads['race_ref']
l_vtx = []
s_vtx = []
r_vtx = []
for right, middle, left in track:
r_vtx.append(right)
s_vtx.append(middle)
l_vtx.append(left)
self.spine = LinearRing(s_vtx)
self.r_edge = LinearRing(r_vtx)
self.l_edge = LinearRing(l_vtx)
r_vtx = [v[0:2] for v in r_vtx]
l_vtx = [v[0:2] for v in l_vtx]
self.polygon = Polygon(l_vtx, holes=[r_vtx])
def _action_space(self):
action_lo = [-1., -1.]
action_hi = [+1., +1.]
return gym.spaces.Box(
|
np.array(action_lo)
|
numpy.array
|
import numpy as np
def test_n_componets_from_reducer():
from pymks import MKSStructureAnalysis
from pymks import DiscreteIndicatorBasis
from sklearn.manifold import LocallyLinearEmbedding
reducer = LocallyLinearEmbedding(n_components=7)
dbasis = DiscreteIndicatorBasis(n_states=3, domain=[0, 2])
model = MKSStructureAnalysis(dimension_reducer=reducer, basis=dbasis)
assert model.n_components == 7
def test_n_components_with_reducer():
from pymks import MKSStructureAnalysis
from pymks import DiscreteIndicatorBasis
from sklearn.manifold import Isomap
reducer = Isomap(n_components=7)
dbasis = DiscreteIndicatorBasis(n_states=3, domain=[0, 2])
model = MKSStructureAnalysis(dimension_reducer=reducer, basis=dbasis,
n_components=9)
assert model.n_components == 9
def test_n_components_change():
from pymks import MKSStructureAnalysis
from pymks import DiscreteIndicatorBasis
dbasis = DiscreteIndicatorBasis(n_states=2)
model = MKSStructureAnalysis(basis=dbasis)
model.n_components = 27
assert model.n_components == 27
def test_default_n_components():
from pymks import MKSStructureAnalysis
from pymks import DiscreteIndicatorBasis
dbasis = DiscreteIndicatorBasis(n_states=2)
model = MKSStructureAnalysis(basis=dbasis)
assert model.n_components == 5
def test_default_dimension_reducer():
from sklearn.decomposition import PCA
from pymks import MKSStructureAnalysis
from pymks import PrimitiveBasis
model = MKSStructureAnalysis(basis=PrimitiveBasis())
assert isinstance(model.dimension_reducer, PCA)
def test_default_correlations():
from pymks import PrimitiveBasis
from pymks import MKSStructureAnalysis
prim_basis = PrimitiveBasis(6)
model_prim = MKSStructureAnalysis(basis=prim_basis)
assert model_prim.correlations == [(0, 0), (0, 1), (0, 2),
(0, 3), (0, 4), (0, 5)]
def test_set_correlations():
from pymks import PrimitiveBasis
from pymks import MKSStructureAnalysis
test_correlations = [(0, 0), (0, 2), (0, 4)]
prim_basis = PrimitiveBasis(6)
model_prim = MKSStructureAnalysis(basis=prim_basis,
correlations=test_correlations)
assert model_prim.correlations == test_correlations
def test_reshape_X():
from pymks import MKSStructureAnalysis
from pymks import PrimitiveBasis
anaylzer = MKSStructureAnalysis(basis=PrimitiveBasis())
X = np.arange(18, dtype='float64').reshape(2, 3, 3)
X_test = np.concatenate((np.arange(-4, 5)[None], np.arange(-4, 5)[None]))
assert np.allclose(anaylzer._reduce_shape(X), X_test)
def test_set_components():
from pymks import MKSStructureAnalysis
from pymks import PrimitiveBasis
p_basis = PrimitiveBasis(2)
model = MKSStructureAnalysis(basis=p_basis)
X = np.random.randint(2, size=(50, 10, 10))
model.fit(X)
components = model.components_
model.components_ = components * 2
assert np.allclose(model.components_, components * 2)
def test_store_correlations():
from pymks import MKSStructureAnalysis
from pymks import PrimitiveBasis
from pymks.stats import correlate
p_basis = PrimitiveBasis(2)
model = MKSStructureAnalysis(basis=p_basis, store_correlations=True)
X = np.random.randint(2, size=(2, 4, 4))
model.fit(X)
X = correlate(X, p_basis, correlations=[(0, 0), (0, 1)])
assert np.allclose(X, model.fit_correlations)
X_0 = np.random.randint(2, size=(2, 4, 4))
model.transform(X_0)
X_corr_0 = correlate(X_0, p_basis, correlations=[(0, 0), (0, 1)])
assert
|
np.allclose(X_corr_0, model.transform_correlations)
|
numpy.allclose
|
'''
Runs unit tests for dimensionality reduction algorithms.
To run the unit tests, type the following from the system command line:
# python -m spectral.tests.dimensionality
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import spectral as spy
from spectral.tests.spytest import SpyTest, test_method
class DimensionalityTest(SpyTest):
'''Tests various math functions.'''
def setup(self):
self.data = spy.open_image('92AV3C.lan').load()
def test_mnf_all_equals_data(self):
'''Test that MNF transform with all components equals original data.'''
data = self.data
signal = spy.calc_stats(data)
noise = spy.noise_from_diffs(data[117: 137, 85: 122, :])
mnfr = spy.mnf(signal, noise)
denoised = mnfr.denoise(data, num=data.shape[-1])
assert(np.allclose(denoised, data))
def test_ppi(self):
'''Tests that ppi function runs'''
data = self.data
p = spy.ppi(data, 4)
def test_ppi_threshold(self):
'''Tests that ppi function runs with threshold arg'''
data = self.data
p = spy.ppi(data, 4, 10)
def test_ppi_continues(self):
'''Tests that running ppi with initial indices works as expected.'''
data = self.data
s = np.random.get_state()
p = spy.ppi(data, 4)
np.random.set_state(s)
p2 = spy.ppi(data, 2)
p2 = spy.ppi(data, 2, start=p2)
assert(np.all(p == p2))
def test_ppi_centered(self):
'''Tests that ppi with mean-subtracted data works as expected.'''
data = self.data
s = np.random.get_state()
p = spy.ppi(data, 4)
np.random.set_state(s)
data_centered = data - spy.calc_stats(data).mean
p2 = spy.ppi(data_centered, 4)
assert(np.all(p == p2))
def test_smacc_minimal(self):
'''Tests smacc correctness on minimal example.'''
H = np.array([
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]
])
S, F, R = spy.smacc(H)
assert(np.allclose(np.matmul(F, S) + R, H))
assert(np.min(F) == 0.0)
expected_S = np.array([
# First two longer ones.
[1., 1., 0.],
[0., 1., 1.],
# First of the two shorted ones. Other can be expressed other 3.
[1., 0., 0.],
])
assert(np.array_equal(S, expected_S))
def test_smacc_runs(self):
'''Tests that smacc runs without additional arguments.'''
# Without scaling numeric errors accumulate.
scaled_data = self.data / 10000
S, F, R = spy.smacc(scaled_data)
data_shape = scaled_data.shape
H = scaled_data.reshape(data_shape[0] * data_shape[1], data_shape[2])
assert(np.allclose(np.matmul(F, S) + R, H))
assert(np.min(F) == 0.0)
assert(len(S.shape) == 2 and S.shape[0] == 9 and S.shape[1] == 220)
def test_smacc_min_endmembers(self):
'''Tests that smacc runs with min_endmember argument.'''
# Without scaling numeric errors accumulate.
scaled_data = self.data / 10000
S, F, R = spy.smacc(scaled_data, 10)
data_shape = scaled_data.shape
H = scaled_data.reshape(data_shape[0] * data_shape[1], data_shape[2])
assert(np.allclose(np.matmul(F, S) + R, H))
assert(np.min(F) == 0.0)
assert(len(S.shape) == 2 and S.shape[0] == 10 and S.shape[1] == 220)
def test_smacc_max_residual_norm(self):
'''Tests that smacc runs with max_residual_norm argument.'''
# Without scaling numeric errors accumulate.
scaled_data = self.data / 10000
S, F, R = spy.smacc(scaled_data, 9, 0.8)
data_shape = scaled_data.shape
H = scaled_data.reshape(data_shape[0] * data_shape[1], data_shape[2])
assert(np.allclose(np.matmul(F, S) + R, H))
assert(np.min(F) == 0.0)
residual_norms = np.einsum('ij,ij->i', R, R)
assert(
|
np.max(residual_norms)
|
numpy.max
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Generic packages
import numpy as np
import hjson, json, os
from pathlib import Path
# For integration times
import astropy.units as u
# For keep outs and to convert decimal dates into readable dates
import EXOSIMS, EXOSIMS.MissionSim
from astropy.time import Time
from scripts.cgi_etc_star_accessibility import cgi_etc_star_accessibility
# (Optional) Plotting the results
import matplotlib.pyplot as plt
# IMD
import pandas as pd
# Updated specs for EXOSIMS
from scripts.cgi_etc_update_specs import cgi_etc_update_specs
# Linear interpolation
from scipy import interpolate
# CSV file
from scripts.store_csv_file import store_csv_file_rv
def cgi_etc_rv_shortest_integration_time(CGI_epoch0, CGI_epoch1, filterList, jsonFile, csvFileName, CGI_Observations):
# Path with the orbital data of the planets from IMD (https://plandb.sioslab.com/index.php)
pathIMD = './imd/'
# Meaning of table parameters from IMD (# https://plandb.sioslab.com/docs/html/index.html#planetorbits-table)
# t = time in days since 01/01/2026
# r = actual distance planet-host star in AU
# s = visual sepraration
# beta = orbital phase angle
# Remember (See above) that IMD data start at 01/01/2026 (not sure whether it is based on mJD, ISO, or else)
imdEpoch0 = 2026.00
# P.S. PName is used later on to read csv files from IMD
# P.P.S. Leave a blank space between the name of the star and the planet, e.g., 14 Her b
PName = CGI_Observations['PName']
nPlanets = len(PName)
# HIP identifiers
hipPName = CGI_Observations['hipPName']
# Derive the star's accessibility
accessibleDays = cgi_etc_star_accessibility(CGI_epoch0, CGI_epoch1,
jsonFile, csvFileName, PName, hipPName)
# Write star names
starName = [''] * nPlanets
starNameCommon = [''] * nPlanets
for i_p in np.arange(nPlanets):
starName[i_p] = hipPName[i_p][0:len(hipPName[i_p])-2]
starNameCommon[i_p] = PName[i_p][0:len(PName[i_p])-2]
# Values of the cloud fsed used by IMD
cloudFsed = [0.00, 0.01, 0.03, 0.10, 0.30, 1.00, 3.00, 6.00]
nFsed = len(cloudFsed)
# Table of weights used to average the DMag from IMD (table provided by <NAME> to <NAME>)
freqFsed = [0.099, 0.001, 0.005, 0.010, 0.025, 0.280, 0.300, 0.280]
# Make sur eit is normalized (it is, just if it gets changed in the future)
freqFsed = freqFsed / np.sum(freqFsed)
# Filters: Band 1, 3 and 4
# P.S. Technical note for developers: Use 'NF', 'Amici_Spec' and 'WF', respectively, because
# these substrings are used when assigning the actual value of
# the post-processing value for each mode. Also, EXOSIMS makes use of 'Amici' and 'Spec' in Nemati_2019.py
nFilters = len(filterList)
# Keeping track of the actual value of the post-processing factor used in the estimations.
# Notice that EXOSIMS's ppFact equals (1/kpp), where kpp is the post-processing factor in CGI Perf,
# which is the usual way to report it. For instance, for the NF, kpp=2, and then EXOSIMS ppFact is 0.5.
kppList = np.empty(nFilters)
kppList.fill(np.nan)
####################################################
# Deriving expected integration times given an SNR #
####################################################
# SNR list
SNRRefList = CGI_Observations['SNRList']
nSNRRef = len(SNRRefList)
# SNR list to derive the integration times (fast, no worries)
# Grid of values of SNR, instead of results for the values in SNRRefList only.
# P.S. Small SNR values are used to highlight cases that are not worth
# observing
SNRList = np.sort(np.concatenate([SNRRefList, np.arange(0.5,20,0.5),
np.arange(20,105,5)], axis=0))
nSNR = len(SNRList)
# Keeping track of the SNR actually found (in general, it should be the same as
# in SNRRefList but they are the values in SNRList closest to SNRRefList)
SNRRefFound = np.empty(len(SNRRefList))
SNRRefFound.fill(np.nan)
## First and last indices for the epochs under consideration
dayEpoch0 = np.round(365.25 * (CGI_epoch0 - imdEpoch0)).astype(int)
dayEpoch1 = np.round(365.25 * (CGI_epoch1 - imdEpoch0)).astype(int)
# Imaging Mission Database says that the orbits are computed every 30 days, but there are cases where this is not the case (02/10/21: https://plandb.sioslab.com/plandetail.php?name=47+UMa+d whose CSV table has steps of 141 days)
# I just assume it is 1 day, although in general it is larger. No problem. The rest of unused indices are filled with NaN
dayEpochArray = np.empty((nPlanets, dayEpoch1 - dayEpoch0 + 1))
dayEpochArray.fill(np.nan)
waArcsecArray = np.empty((nPlanets, dayEpoch1 - dayEpoch0 + 1))
waArcsecArray.fill(np.nan)
fRatioArray = np.empty((nPlanets, nFilters, dayEpoch1 - dayEpoch0 + 1))
fRatioArray.fill(np.nan)
intTimeFilterHours = np.empty((nPlanets, nFilters, nSNR, dayEpoch1 - dayEpoch0 + 1))
intTimeFilterHours.fill(np.nan)
sInds = np.empty(nPlanets, dtype=int)
sInds.fill(np.nan)
# Looping over filters
for i_flt in np.arange(nFilters):
# Updating the instrumental specs because of the different post-processing factor for each filter.
kppTmp, OSTmp, TLTmp, TKTmp = \
cgi_etc_update_specs(jsonFile, filterList[i_flt],
CGI_epoch0, CGI_epoch1)
kppList[i_flt] = kppTmp
mode = list(filter(lambda mode: mode['instName'] == filterList[i_flt], OSTmp.observingModes))[0]
# Local zodi
fZ = TLTmp.ZodiacalLight.fZ0
# Loop over planets
for i_pl in np.arange(nPlanets):
# Index where the host star is found in the target list
sInds[i_pl] = np.where(TLTmp.Name == starName[i_pl])[0]
# Reading the CSV file from IMD
PStr = PName[i_pl]
# P.S. From IMD: if no inclination available, orbit is assumed edge-on. If no eccentricity is available, orbit is assumed circular.
planetDataOrig = pd.read_csv(pathIMD + PStr.replace(' ', '_' ) + '_orbit_data.csv')
# IMD documentation (point 11 in https://plandb.sioslab.com/docs/html/index.html#planetorbits-table)
# say (sic) "NaN when period of time of periastron passage are undefined"
# If this is the case skip the planet
if np.isnan(planetDataOrig['t']).all() == True:
print('WARNING: Planet ' + PName[i_pl] + ' has undefined Ephemeris. Skipping it ...')
continue
# Creating a new pandas dataframe for each day using linear interpolation
dict_tmp = {}
dict_tmp['t'] = dayEpoch0 + np.arange(dayEpoch1-dayEpoch0+1)
for column in planetDataOrig.columns:
if column == 't': continue
if isinstance(planetDataOrig[column][0], float):
interpolant = interpolate.interp1d(planetDataOrig['t'],
planetDataOrig[column], kind='linear')
# IMD ephemeris may have more than 1 orbit
try:
orbital_period = np.where(planetDataOrig['t']==0)[0]
orbital_period_days = \
planetDataOrig['t'][orbital_period[1]-1] - \
planetDataOrig['t'][orbital_period[0]]
dict_tmp[column] = \
interpolant(dict_tmp['t'] % orbital_period_days)
except:
dict_tmp[column] = interpolant(dict_tmp['t'])
# database
planetDataCgi = pd.DataFrame.from_dict(dict_tmp)
dayEpochArray[i_pl,0:len(planetDataCgi)] = planetDataCgi['t']
# Angular visual separation of the planet
waArcsec = planetDataCgi['WA'].values / 1000 * u.arcsec
waArcsecArray[i_pl,0:len(waArcsec)]=waArcsec.value
# Actual planet-star distance (only used for exozodi)
r_au = planetDataCgi['r'].values * u.AU
# Fiducial visual inclination (only used for exozodi). The CSV files from IMD do not provide it.
inc_deg = [20] * u.deg
# Exozodi along the orbit
fEZ = TLTmp.ZodiacalLight.fEZ(np.array([TLTmp.MV[sInds[i_pl]]]), inc_deg, r_au)
fRatio = np.zeros(len(planetDataCgi['t']))
# Looping over cloud fsed to get the average flux ratio
for i_fsed in np.arange(nFsed):
# Using the center wavelength of each observing mode to select the corresponding data
# These values are stored in new columns pPhi_XXXC_YYYNM and dMag_XXXC_YYYNM
# where XXX is the cloud fsed scaled by 100 (000 representing no cloud) and
# YYY is the wavelength in nm.
keyPlanetDataCgi = 'dMag_' + str(format(np.round(cloudFsed[i_fsed] * 100).astype(int),'03d')) + 'C_' + str(mode['lam'].to_value().astype(int)) + 'NM'
fRatio = fRatio + freqFsed[i_fsed] * np.power(10,-0.4 * planetDataCgi[keyPlanetDataCgi])
fRatioArray[i_pl, i_flt,0:len(fRatio)]= np.array(fRatio)
dMags = -2.5 * np.log10(np.array(fRatio))
# Only consider days that are accessible
try:
dMags[accessibleDays==False]=np.nan
# Pass in case accessibility has not been computed
except:
pass
# Looping over SNR
for i_snr in np.arange(nSNR):
mode['SNR'] = SNRList[i_snr]
intTimeTmp = OSTmp.calc_intTime(TLTmp, np.array([sInds[i_pl]]), fZ, fEZ, dMags, waArcsec, mode, TK=TKTmp).to('hour').value
intTimeTmp[np.where(intTimeTmp == 0)] = np.nan
intTimeFilterHours[i_pl, i_flt, i_snr, 0:len(fRatio)] = intTimeTmp
# Restoring the 'true_divide' error after EXOSIMS run
np.seterr(divide='warn', invalid='warn')
# Getting the maximum time that the target is accessible and its SNR
SNRPlanetMax = np.empty((nPlanets, nFilters))
SNRPlanetMax.fill(np.min(SNRList))
intTimeSNRMax = np.empty((nPlanets, nFilters))
intTimeSNRMax.fill(np.nan)
intTmpHours = np.empty((nSNR))
intTmpHours.fill(np.nan)
for i_pl in np.arange(nPlanets):
# Days that the target is accessible
if nPlanets == 1:
nDaysPlanet = np.sum(accessibleDays)
else:
nDaysPlanet = np.sum(accessibleDays[i_pl])
for i_flt in np.arange(nFilters):
for i_snr in np.arange(nSNR):
# Shortest integration time within accessible times
intTmpHours[i_snr] = \
np.nanmin(intTimeFilterHours[i_pl, i_flt, i_snr, :])
# First time that it is not possible to achieve an SNR,
# it means that the previous step was the largest value
if np.isnan(intTmpHours[i_snr]) == False:
# If the integration time fits within the accessibility window
if intTmpHours[i_snr] <= (nDaysPlanet*24):
SNRPlanetMax[i_pl, i_flt] = SNRList[i_snr]
intTimeSNRMax[i_pl, i_flt] = intTmpHours[i_snr]
else:
SNRInterpolant = interpolate.interp1d(
intTmpHours[0:i_snr+1], SNRList[0:i_snr+1],
kind='linear')
# Round to 1 decimal place (it's SNR)
SNRPlanetMax[i_pl, i_flt] = \
np.round(SNRInterpolant(nDaysPlanet*24), decimals=1)
intTimeSNRMax[i_pl, i_flt] = nDaysPlanet*24
# Replace bad cases by NaN now
for i_pl in np.arange(nPlanets):
for i_flt in np.arange(nFilters):
if SNRPlanetMax[i_pl, i_flt] == np.min(SNRList):
SNRPlanetMax[i_pl, i_flt] = np.nan
intTimeSNRMax[i_pl, i_flt]= np.nan
# Summarize results
nSNRRef = len(SNRRefList)
# The Epoch of observation, WA, and flux ratio do not change with SNR
dayEpochBestTime = np.empty((nPlanets, nFilters, nSNR, 3))
dayEpochBestTime.fill(np.nan)
# Days that are necessary to get the integration time
# (e.g., according to some observing sequence, like OS11)
dayOperationalBestTime = np.empty((nPlanets, nFilters, nSNR, 3))
dayOperationalBestTime.fill(np.nan)
# In the case of OS11, we have that 14 hours out of 24 are dedicated to observing a target
fOperation = 14 / 24 ;
waMasBestTime = np.empty((nPlanets, nFilters, nSNR, 3))
waMasBestTime.fill(np.nan)
fRatioBestTime = np.empty((nPlanets, nFilters, nSNR, 3))
fRatioBestTime.fill(np.nan)
# The integration time depends on the SNR
intTimeBestHours = np.empty((nPlanets, nFilters, nSNR))
intTimeBestHours.fill(np.nan)
for i_pl in np.arange(nPlanets):
for i_flt in np.arange(nFilters):
i_snr_2 = 0
for snr in SNRRefList:
i_snr = int(np.where(np.abs(snr - SNRList) == \
np.min(np.abs(snr - SNRList)))[0][0])
# Finding the shortest integration time
# If all are NaN, skip
if (np.isnan(intTimeFilterHours[i_pl, i_flt, i_snr]).all()) == True:
continue
indBest = np.where(intTimeFilterHours[i_pl, i_flt, i_snr] == np.nanmin(intTimeFilterHours[i_pl, i_flt, i_snr]))
# Veerify that the integration time is less than the maximum available
if (indBest[0].size != 0) and \
(intTimeFilterHours[i_pl, i_flt, i_snr, indBest] < intTimeSNRMax[i_pl, i_flt]):
dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] = dayEpochArray[i_pl, indBest]
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 1] = dayEpochArray[i_pl, indBest]
waMasBestTime[i_pl, i_flt, i_snr_2, 1] = waArcsecArray[i_pl, indBest] * 1000 # arcsec to milli-arcsec
fRatioBestTime[i_pl, i_flt, i_snr_2, 1] = fRatioArray[i_pl, i_flt, indBest]
intTimeBestHours[i_pl, i_flt, i_snr_2] = intTimeFilterHours[i_pl, i_flt, i_snr, indBest]
# Filling out the values before/after the best time
dayEpochBestTime[i_pl, i_flt, i_snr_2, 0] = dayEpochBestTime[i_pl, i_flt, i_snr, 1] - intTimeBestHours[i_pl, i_flt, i_snr] / 24 / 2
# In case the first date is before the mission start
if dayEpochBestTime[i_pl, i_flt, i_snr_2, 0] < 0:
dayEpochBestTime[i_pl, i_flt, i_snr_2, 0] = 0
dayEpochBestTime[i_pl, i_flt, i_snr_2, 2] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] + intTimeBestHours[i_pl, i_flt, i_snr_2] / 24
else:
dayEpochBestTime[i_pl, i_flt, i_snr_2, 2] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] + intTimeBestHours[i_pl, i_flt, i_snr_2] / 24 / 2
# Operational days have a fudge factor
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 0] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] - ( 1 / fOperation) * intTimeBestHours[i_pl, i_flt, i_snr_2] / 24 / 2
# In case the first date is before the mission start
if dayOperationalBestTime[i_pl, i_flt, i_snr_2, 0] < 0:
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 0] = 0
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 2] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] + ( 1 / fOperation ) * intTimeBestHours[i_pl, i_flt, i_snr_2] / 24
else:
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 2] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] + ( 1 / fOperation ) * intTimeBestHours[i_pl, i_flt, i_snr_2] / 24 / 2
waMasBestTime[i_pl, i_flt, i_snr_2, 0] = np.interp(dayEpochBestTime[i_pl, i_flt, i_snr_2, 0],
dayEpochArray[i_pl,~np.isnan(dayEpochArray[i_pl])],
1000 * waArcsecArray[i_pl,~np.isnan(dayEpochArray[i_pl])])
waMasBestTime[i_pl, i_flt, i_snr_2, 2] = np.interp(dayEpochBestTime[i_pl, i_flt, i_snr_2, 2],
dayEpochArray[i_pl,~np.isnan(dayEpochArray[i_pl])],
1000 * waArcsecArray[i_pl,~np.isnan(dayEpochArray[i_pl])])
fRatioBestTime[i_pl, i_flt, i_snr_2, 0] = np.interp(dayEpochBestTime[i_pl, i_flt, i_snr_2, 0],
dayEpochArray[i_pl,~np.isnan(dayEpochArray[i_pl])],
fRatioArray[i_pl, i_flt, ~np.isnan(dayEpochArray[i_pl])])
fRatioBestTime[i_pl, i_flt, i_snr_2, 2] = np.interp(dayEpochBestTime[i_pl, i_flt, i_snr_2, 2],
dayEpochArray[i_pl, ~np.isnan(dayEpochArray[i_pl])],
fRatioArray[i_pl, i_flt, ~np.isnan(dayEpochArray[i_pl])])
# Update counter of SNR provided by the user
i_snr_2 += 1
# Maximum integration times in hours (used for plotting)
maxIntTimeHours = CGI_Observations['maxIntTimeHours'] # maximum CI allocation time for a single target
# Create the folder where the figures will be stored
dir_figures = './output/figures/'
if os.path.exists(dir_figures) == False:
os.mkdir(dir_figures)
# Selecting the planets with some integration time
indPlanetOK = np.empty(0, dtype=int)
indPlanetOK.fill(np.nan)
# Sentinel
i_pl_OK = 0
for i_pl in np.arange(nPlanets):
if np.isnan(intTimeFilterHours[i_pl]).all() == False:
indPlanetOK = np.append( indPlanetOK, i_pl_OK )
i_pl_OK += 1
# Number of planets with some finite integration times.
nPlanetOK = len(indPlanetOK)
###################################
# Store the results in a CVS file #
###################################
store_csv_file_rv(filterList, kppList, PName,
dayEpochBestTime, waMasBestTime, fRatioBestTime,
SNRRefList, intTimeBestHours, SNRPlanetMax,
intTimeSNRMax, csvFileName)
###############################################
# Plotting the results for the most favorable #
# time without taking into account the #
# accessibility of each target #
###############################################
# Useful to extract elements from a list (https://code.activestate.com/recipes/577953-get-multiple-elements-from-a-list/)
getVar = lambda searchList, ind: [searchList[i] for i in ind]
# Turn off to stop plotting the results
# 10/28/21: coming soon
if CGI_Observations['bar_plot'].lower() == 'yes':
# Font size
fontSize = 18
# Number of planets per plot
nPlanetPlot = np.min([6,nPlanetOK])
# Number of plots
nPlots =
|
np.ceil(nPlanetOK / nPlanetPlot)
|
numpy.ceil
|
import numpy as np
from pyDOE import lhs # The experimental design package for python; Latin Hypercube Sampling (LHS)
# import sobol_seq # require https://pypi.org/project/sobol_seq/
# Generate coordinates vector for uniform grids over a 2D rectangles. Order starts from left-bottom, row-wise, to right-up
def rectspace(a,b,c,d,nx,ny):
x = np.linspace(a,b,nx)
y = np.linspace(c,d,ny)
[X,Y] = np.meshgrid(x,y)
Xm = np.concatenate([X.reshape((-1, 1)), Y.reshape((-1, 1))], axis=1)
return Xm
def rectspace_dis(lb,ub,N,len_ratio=None,adjust=None,rand_rate=None):
if len_ratio:
ny = np.sqrt(N/len_ratio).astype(int)
nx = (N/ny).astype(int)
N_new = nx * ny
else:
ny = np.sqrt(N/2).astype(int)
nx = (N/ny).astype(int)
N_new = nx * ny
a, b, c, d = lb[0], ub[0],lb[1],ub[1]
if adjust:
a = a + adjust
b = b - adjust
c = c + adjust
d = d - adjust
Xm = rectspace(a,b,c,d,nx,ny)
if rand_rate:
Xm[:,0:1] = Xm[:,0:1] + np.random.normal(0,rand_rate,(N_new,1))
Xm[:,1:2] = Xm[:,1:2] + np.random.normal(0,rand_rate,(N_new,1))
return Xm, N_new
# def rect_sob_Dis2d(Nx,Ny,N_k,ny0=None,rand_rate=None):
# # Quasi-random: i4_sobol_generate
# if ny0:
# ny = ny0
# else:
# ny = np.sqrt(N_k/2).astype(int)
# nx = (N_k/ny).astype(int)
# if nx*ny != N_k:
# print('Error: nx*ny not equal to N_k, reset N_k')
# N_k = nx*ny
# sob_k = sobol_seq.i4_sobol_generate(2,Nx*Ny) # Generate a 2-Dim Vecotr
# # array = sobol_seq.i4_sobol_generate(2, 10)
# XX, YY = np.meshgrid(sob_k[0:nx,0],sob_k[0:ny,1])
# Xm_k = np.zeros((N_k,2))
# if rand_rate:
# Xm_k[:,0:1] = XX.reshape((nx*ny,-1)) + np.random.normal(0,rand_rate,(nx*ny,1))
# Xm_k[:,1:2] = YY.reshape((nx*ny,-1)) + np.random.normal(0,rand_rate,(nx*ny,1))
# else:
# Xm_k[:,0:1] = XX.reshape((nx*ny,-1))
# Xm_k[:,1:2] = YY.reshape((nx*ny,-1))
# return Xm_k, N_k
# def rect_sob_DisIndex(Nx,Ny,N_k,ny0=None):
# if ny0:
# ny = ny0
# else:
# ny = np.sqrt(N_k/2).astype(int)
# nx = (N_k/ny).astype(int)
# sob_k = sobol_seq.i4_sobol_generate(2,Nx*Ny)
# ldx_k = (sob_k[0:nx,0] * Nx).astype(int)
# ldy_k = (sob_k[0:ny,1] * Ny).astype(int)
# idx_k = []
# for i_loop in ldy_k:
# for j_loop in ldx_k:
# idx_i = i_loop * Nx + j_loop
# idx_k.append(idx_i)
# return idx_k
def rect_PartitionedDisUni2d(N,Ns,Np_x,Np_y,rand_rate=None):
# for [0,1] * [0,1], partioned into Np_x * Np_y domains, and random select locations from these subdomains
num_par = Np_x * Np_y
Ns_k = Ns // num_par
Ns_res = Ns % num_par
Index_Ns = (Ns_k * np.ones(num_par)).astype(int)
# idx = np.random.choice(num_par,Ns_res,replace=False)
Index_Ns[0:Ns_res] += 1
xm = np.linspace(0, 1, Np_x+1)
ym = np.linspace(0, 1, Np_y+1)
XM =
|
np.zeros((Ns,2))
|
numpy.zeros
|
# Copyright (c) 2008 Carnegie Mellon University
#
# You may copy and modify this freely under the same terms as
# Sphinx-III
"""
Train generic Gaussian Mixture Models from speech data.
This module defines a GMM class which can be used to train generic
models of speech for use in speaker identification or VTLN.
"""
__author__ = "<NAME> <<EMAIL>>"
__version__ = "$Revision: 10058 $"
import sys
import os
import s3gau
import s3mixw
import numpy
def logadd(x,y):
"""Log-add two numbers."""
return x + numpy.log(1 + numpy.exp(y-x))
class GMM(object):
"""
Class representing a Gaussian Mixture Model.
"""
def __init__(self, fromdir=None,
featlen=13, ndensity=256,
mixwfloor=0.001, varfloor=0.001):
"""
Constructor for GMM class.
@param fromdir: Directory to read initial parameters from.
@ptype fromdir: string
@param featlen: Dimensionality of input features.
@ptype featlen: int
@param ndensity: Number of Gaussian components.
@ptype ndensity: int
@param varfloor: Floor value to apply to variances before evaluation.
@ptype varfloor: float
@param mixwfloor: Floor value to apply to mixture weights before evaluation.
@ptype mixwfloor: float
"""
if fromdir != None:
self.read(fromdir)
else:
self.random_init(featlen, ndensity)
self.varfloor = varfloor
self.mixwfloor = mixwfloor
self.precompute()
self.reset()
def read(self, fromdir):
"""
Read GMM parameters from files in a directory.
@param fromdir: Directory to read parameters from. The files
'means', 'variances', and 'mixture_weights' will be read from
this directory.
@ptype fromdir: string
"""
self.means = s3gau.open(os.path.join(fromdir, "means"))
self.variances = s3gau.open(os.path.join(fromdir, "variances"))
self.mixw = s3mixw.open(os.path.join(fromdir, "mixture_weights"))
self.featlen = self.means.veclen[0]
self.ndensity = self.means.density
def write(self, todir):
"""
Write GMM parameters to files in a directory.
@param todir: Directory to read parameters from. The files
'means', 'variances', and 'mixture_weights' will be created in
this directory.
@ptype todir: string
"""
s3gau.open(os.path.join(todir, "means"),'wb').writeall([[self.means]])
s3gau.open(os.path.join(todir, "variances"),'wb').writeall([[self.variances]])
s3mixw.open(os.path.join(todir, "mixture_weights"),'wb').writeall \
(self.mixw[numpy.newaxis,numpy.newaxis,:])
def random_init(self, featlen=13, ndensity=256):
"""
Initialize parameters with arbitrary initial values.
"""
self.means = numpy.random.random((ndensity, featlen)) * 10 - 5
self.variances = numpy.ones((ndensity,featlen))
self.mixw = numpy.random.random(ndensity)
self.mixw /= self.mixw.sum()
self.featlen = featlen
self.ndensity = ndensity
def precompute(self):
"""
Precompute Gaussian invariants for density calculation.
"""
variances = self.variances.clip(self.varfloor, numpy.inf)
mixw = self.mixw.clip(self.mixwfloor, numpy.inf)
self.inv_var = 0.5/variances
self.log_det_var = (numpy.log(mixw) - # mixw * 1 /
0.5 * # sqrt
(self.featlen * numpy.log(2 * numpy.pi) # 2pi ** featlen
+ numpy.log(variances).sum(1))) # prod(v for v in variances)
def reset(self):
"""
Reset internal accumulators.
"""
self.mixwacc = numpy.zeros(self.ndensity, 'd')
self.meanacc =
|
numpy.zeros((self.ndensity,self.featlen), 'd')
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Functions required for the Darcy-type liquid water flow scheme
@author: Vincent
"""
import numpy as np
def hydrconducsat_Calonne(rad,rho):
'''Saturated hydraulic conductivity of Calonne et al. Eq. (6)'''
mu = 0.001792 #dynamic viscosity of water at 273.15K [kg m-1 s-1]
bigksat = 3*(rad)**2 * 1000*9.81/mu * np.exp(-0.013*rho) #[m s-1]
return(bigksat)
def vG_Yama(rad,rho,thetaeff):
'''Pressure head and relative hydraulic conductivity computations
from the van Genuchten (1980) model with the Yamaguchi et al. (2012)
parameterisation'''
alpha = 4.4e6*(rho/(2*rad))**(-0.98) #.alpha parameter, Yamaguchi 2012 Eq.(6)
n = 1+2.7e-3*(rho/(2*rad))**0.61 #n parameter, Yamaguchi 2012 Eq. (7)
m = 1-(1/n) #m parameter, Yamaguchi 2012 p.7
head = 1/alpha * (thetaeff**(-1/m)-1)**(1/n) #head pressure, Hirashima 2010 (9)
bigkrel = thetaeff**(1/2) * (1-(1-thetaeff**(1/m))**m)**2 # Hirashima 2010 (10)
return(head,bigkrel)
def thetae_update(absfl,th_i,th_s,LWC,dz):
'''Updates effective saturation for a given total water flux absfl [m]'''
lw_in = np.append(0,absfl)
lw_out = np.append(absfl,0)
th_w = (LWC+lw_in-lw_out)/dz
th_e = (th_w-th_i)/(th_s-th_i) #effective water saturatin, Hirashima 2010 (5)
stab_e = 1e-9 #stabilisation theta_e
th_e = np.maximum(stab_e,th_e) #avoid negative effective saturation
th_e = np.minimum(1-stab_e,th_e) #avoid effective saturation equal to 1
return(th_e)
def thetaeff_equaliser(th_i2,th_s2,LWC2,dz2):
'''
Computes the total flow needed to equalise saturation between neighbouring nodes
All input arrays must be of two elements
'''
th_w = LWC2/dz2
th_e0 = (th_w-th_i2)/(th_s2-th_i2) #effective water saturatin, Hirashima 2010 (5)
# lwflux from index[0] to index[1] ensures equal saturation between both volumes #
lwflux = ((dz2[0]*(th_s2[0]-th_i2[0]))**(-1)+(dz2[1]*(th_s2[1]-th_i2[1]))**(-1))**(-1) * (th_e0[0]-th_e0[1])
return(lwflux)
def vG_Yama_params(rad,rho):
'''Computes the van Genuchten parameters following the parameterisation
of Yamaguchi et al. (2012)'''
alpha = 4.4e6*(rho/(2*rad))**(-0.98) #.alpha parameter, Yamaguchi 2012 Eq.(6)
n = 1+2.7e-3*(rho/(2*rad))**0.61 #n parameter, Yamaguchi 2012 Eq. (7)
m = 1-(1/n) #m parameter, Yamaguchi 2012 p.7
return(alpha,n,m)
def phead_vG(alpha,n,m,thetaeff):
'''Computes pressure head according to the van Genuchten model'''
head = 1/alpha * (thetaeff**(-1/m)-1)**(1/n) #head pressure, Hirashima 2014 (3)
return(head)
def krel_vG(m,thetaeff):
'''Computes relative hydraulic conductivity according to the van Genuchten model'''
bigkrel = thetaeff**(1/2) * (1-(1-thetaeff**(1/m))**m)**2 # Hirashima 2010 (10)
return(bigkrel)
def dfdg_derivative(th_sfull,th_ifull,th_efull,alphafull,nfull,mfull,dzfull):
'''
Computes the derivative of the equilibrium variable (f_eq, defined by setting all terms
of Eq.(20) of Hirashima et al. (2010) on the right-hand-side) with respect to the water
flux at the interface of two neighbouring nodes
'''
th_s,th_i,th_e = th_sfull[0:-1],th_ifull[0:-1],th_efull[0:-1]
alpha,n,m,dz = alphafull[0:-1],nfull[0:-1],mfull[0:-1],dzfull[0:-1]
th_sd,th_id,th_ed = th_sfull[1:],th_ifull[1:],th_efull[1:]
alphad,nd,md,dzd = alphafull[1:],nfull[1:],mfull[1:],dzfull[1:]
dfdg = 1/((th_s-th_i)*alpha*n*m*dz) * th_e**(-1*(1+1/m)) * (th_e**(-1/m)-1)**((1-n)/n) + \
1/((th_sd-th_id)*alphad*nd*md*dzd) * th_ed**(-1*(1+1/md)) * (th_ed**(-1/md)-1)**((1-nd)/nd)
return(dfdg)
def flux_bisection(gc,LWCav,glwcacm,th_i,th_s,lwc,dz,avG,nvG,mvG,eps_cvg):
'''
Bisection algorithm to find guess of water flux (gc) between two neighbouring nodes that brings
head pressures close to equilibrium (f_eq, defined by setting all terms of Eq.(20) of Hirashima
et al. (2010) on the right-hand-side)
'''
bisitmax = 100 #maximum number of iteration for bisection algorithm
bisit = 0 #iteration number for bisection algorithm
cvg_bis = False #convergence criterion for Bisection
dltz = 1/2*sum(dz) #distance between the centres of the two nodes
gth_e = thetae_update(gc,th_i,th_s,lwc,dz)
ghd = phead_vG(avG,nvG,mvG,gth_e) #pressure head [m]
f_eq = ghd[0]-ghd[1]-dltz #Hirashima 2010 Eq.(20) evaluated at interface
# Initialise bisection bounds #
g0 = 0. #lower bisection bound
g1 = min(LWCav[0],glwcacm[1]) #upper bisection bound (limited by lwc available and pore space available)
while (cvg_bis==False and bisit<bisitmax): #start Bisection (if cvg_bis is False)
gprev0 = np.copy(gc) #flux guess at previous iteration
if f_eq<0: #hd[0] too low -> increase outgoing flux
g0 =
|
np.copy(gc)
|
numpy.copy
|
# authors_name = '<NAME>'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '<EMAIL>'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input),
|
np.array(skeleton_data_target)
|
numpy.array
|
from __future__ import division, print_function
import numpy as np
from astropy.stats.funcs import median_absolute_deviation as MAD
from scipy.ndimage import label
from martinsff import martinsff
import extract_lc
from photo_test import raw_moment, intertial_axis, plot_bars
from ktransit import FitTransit, LCModel
import ktransit
from planet_params import *
def get_label_im(fluxarr,bg_cut):
fbg = bg_sub(fluxarr)
flatimx = np.nanmedian(fbg,axis=0)
vals = flatimx[np.isfinite(flatimx)].flatten()
mad_cut = 1.4826 * MAD(vals) * bg_cut
flatimx[np.isnan(flatimx)] = 0.
region = np.where(flatimx > mad_cut,1,0)
lab = label(region)[0]
#find the central pixel
imshape = np.shape(flatimx)
centralpix = [1+imshape[0] // 2,1+imshape[1] // 2]
regnum = lab[centralpix[0],centralpix[1]]
labim = np.where(lab == regnum, 1, 0)
return labim
def bg_sub(fla,smear=True):
"""
subtract the background from a series of images
by assuming the aperture is large enough to be
predominantly background
"""
for i in xrange(np.shape(fla)[0]):
if smear:
fla[i,:,:] = fla[i,:,:] - np.nanmedian(fla[i,:,:], axis=0)
else:
fla[i,:,:] = fla[i,:,:] - np.nanmedian(fla[i,:,:])
return fla
def optimalAperture(t_time, t_fluxarr, t_quality, qual_cut=False, return_qual=False, toss_resat=False, bg_cut=5, skip=0):
"""
This routine determines an optimal apertures and outputs the flux (i.e. a light curve) from a TPF.
Inputs:
------------
t_time = 1D array of 'TIME' from TPF
t_fluxarr = 1D array of 'FLUX' from TPF
t_quality = 1D array of 'QUALITY' from TPF
qual_cut = exclude cadences with a non-zero quality flag; this is False by default
return_qual = if True then nothing is returned; this is True by default
toss_resat = exclude cadences where there is a wheel resaturation event; this is True by default
bg_cut = threshold to find pixels that are bg_cut * MAD above the median
skip = index of first cadence that should be used in the time series
Outputs:
------------
time = 1D array of time in BKJD
lc = 1D array of flux measured in optimal aperture
xbar = 1D array of x-coordinate of target centroid
ybar = 1D array of y-coordinate of target centroid
regnum = integer value of brightest pixel
lab = 2D array identifying pixels used in aperture
Usage:
------------
tpf,tpf_hdr = ar.getLongTpf(k2id, campaign, header=True)
tpf_time = tpf['TIME']
tpf_flux = tpf['FLUX']
tpf_quality = tpf['QUALITY']
time,lc,xbar,ybar,regnum,lab = optimalAperture(tpf_time, tpf_flux, tpf_quality, qual_cut=False, return_qual=False, toss_resat=True, bg_cut=5, skip=0)
"""
time = t_time[skip:]
fluxarr = t_fluxarr[skip:]
quality = t_quality[skip:]
if qual_cut:
time = time[quality == 0]
fluxarr = fluxarr[quality == 0,:,:]
elif toss_resat:
# cadences where there is a wheel resaturation event
time = time[quality != 32800]
fluxarr = fluxarr[quality != 32800,:,:]
#remove any nans
try:
fluxarr[fluxarr == 0] = np.nan
except ValueError:
pass
#subtract background
flux_b = bg_sub(fluxarr)
# create a median image to calculate where the pixels to use are
flatim = np.nanmedian(flux_b,axis=0)
#find pixels that are X MAD above the median
vals = flatim[np.isfinite(flatim)].flatten()
mad_cut = 1.4826 * MAD(vals) * bg_cut
flatim[np.isnan(flatim)] = 0.
region = np.where(flatim > mad_cut,1,0)
lab = label(region)[0]
#find the central pixel
imshape = np.shape(flatim)
centralpix = [1+imshape[0] // 2,1+imshape[1] // 2]
#find brightest pix within 9x9 of central pix
#this assumes target is at center of postage stamp which I think is ok
centflatim = flatim[centralpix[0]-2:centralpix[0]+2,
centralpix[1]-2:centralpix[1]+2]
flatimfix = np.where(np.isfinite(centflatim),centflatim,0)
brightestpix = np.unravel_index(flatimfix.argmax(), centflatim.shape)
bpixy, bpixx = brightestpix
#use all pixels in the postage stamp that are X MAD above the median
#this identifies location of brightest pixel only
regnum = lab[centralpix[0]-2+bpixy,centralpix[1]-2+bpixx]
lc = np.zeros_like(time)
xbar = np.zeros_like(time)
ybar = np.zeros_like(time)
#make a rectangular aperture for the moments thing
ymin = np.min(np.where(lab == regnum)[0])
ymax = np.max(np.where(lab == regnum)[0])
xmin = np.min(np.where(lab == regnum)[1])
xmax = np.max(np.where(lab == regnum)[1])
momlims = [ymin,ymax+1,xmin,xmax+1]
#loop that performs the aperture photometry
for i,fl in enumerate(flux_b):
lc[i] = np.sum(fl[lab == regnum])
#lc[i] = np.sum(fl[np.where(lab == 1)]
momim = fl[momlims[0]:momlims[1],
momlims[2]:momlims[3]]
momim[~np.isfinite(momim)] == 0.0
xbar[i], ybar[i], cov = intertial_axis(momim)
xbar[~np.isfinite(xbar) | ~np.isfinite(ybar)] = np.nan
ybar[~np.isfinite(xbar) | ~np.isfinite(ybar)] = np.nan
if return_qual:
return None
else:
# TODO: think about whether this should be normalized
return (time,lc, xbar - np.nanmean(xbar), ybar - np.nanmean(ybar), regnum, lab)
def get_lc(time1, fluxarr1, quality1, n_chunks, bg_cut, flatlc_window, smooth_window):
time, lc, xbar, ybar, regnum, lab = optimalAperture(
time1, fluxarr1, quality1, qual_cut=False, return_qual=False,
toss_resat=False, bg_cut=bg_cut, skip=None)
m1 = np.isfinite(lc) * np.isfinite(lc)
time = time1[m1][quality1[m1] == 0]
lc = lc[m1][quality1[m1] == 0]
xbar = xbar[m1][quality1[m1] == 0]
ybar = ybar[m1][quality1[m1] == 0]
flatlc = extract_lc.medfilt(time,lc,window=flatlc_window)
cadstep = np.int(np.floor(len(time) / n_chunks)) #600
zpt = len(time) % cadstep
if zpt==cadstep:
zpt = 0
outflux, correction, thr_cad = extract_lc.run_C0_detrend(
time, flatlc, xbar, ybar, cadstep=cadstep, skip=None)
not_thr = ~thr_cad
corflux = (lc[zpt:][not_thr]/
np.median(lc[zpt:][not_thr])/
correction[not_thr])
corflatflux = (flatlc[zpt:][not_thr]/
np.median(flatlc[zpt:][not_thr])/
correction[not_thr])
# The 1.4826 and *4 factors make this similar to a 4-sigma cut.
mad_cut = 1.4826*MAD(corflatflux-1.) * 4.0
keep = np.abs(corflatflux-1.) < mad_cut # this might not be used
m2 = np.ones_like(corflatflux,dtype=bool)#corflatflux < 1.1
t1 = time[zpt:][not_thr][m2]
cfflux = extract_lc.medfilt(t1,corflux,window=smooth_window) - 1.0
return time, lc, xbar, ybar, t1, corflux, cfflux
def transit_fit(t1, cfflux, cadence='long', rho=50.0):
addtime = 4833 # convert from trappist time to kepler time
time = t1 [(cfflux < 0.025) * (cfflux > -0.025)] +addtime # you need a time and a flux
flux = cfflux[(cfflux < 0.025) * (cfflux > -0.025)] # there are no transits here :(
ferr = np.ones_like(time) * 0.001 # uncertainty on the data
fitT = FitTransit()
fitT.add_guess_star(rho=rho, ld1 = 1.0181, ld2 = -0.0404) # fixed because I'm sleepy
for planet in [planetb, planetc, planetd, planete, planetf, planetg, planeth]:
fitT.add_guess_planet(
period=planet['period_days'][0], impact=planet['impact'][0],
T0=planet['t0'][0], rprs=(planet['td_percent'][0]/100)**0.5)
if cadence == 'long':
fitT.add_data(time=time, flux=flux, ferr=ferr,
itime=np.ones_like(time) * 0.0188)
elif cadence == 'short':
fitT.add_data(time=time, flux=flux, ferr=ferr,
itime=np.ones_like(time) * 0.0188 / 30)
vary_star = ['rho', 'zpt'] # free stellar parameters
vary_planet = (['period', # free planetary parameters
'T0', 'impact',
'rprs']) # free planet parameters are the same for every planet you model
fitT.free_parameters(vary_star, vary_planet)
fitT.do_fit() # run the fitting
return time, flux, fitT
def get_qf(time,flux,epoch,period,transitmodel=None):
date1 = (time - epoch) + 0.5*period
phi1 = (((date1 / period) - np.floor(date1/period)) * period) - 0.5*period
q1 = np.sort(phi1)
f1 = (flux[
|
np.argsort(phi1)
|
numpy.argsort
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame, Categorical, Series
def __convert_to_df(a, val_col, group_col):
if isinstance(a, DataFrame):
x = a.copy()
if not all([group_col, val_col]):
raise ValueError('group_col, val_col must be explicitly specified')
else:
x = np.array(a)
if not all([group_col, val_col]):
try:
groups = np.array([len(a) * [i + 1] for i, a in enumerate(x)])
groups = sum(groups.tolist(), [])
x = sum(x.tolist(), [])
x = np.column_stack([x, groups])
val_col = 0
group_col = 1
except:
raise ValueError('array cannot be processed, provide val_col and group_col args')
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
x.rename(columns={group_col: 'groups', val_col: 'y'}, inplace=True)
group_col = 'groups'
val_col = 'y'
return x
def __convert_to_block_df(a, y_col, group_col, block_col, melted):
if isinstance(a, DataFrame) and not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x = a.melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.columns[group_col] = 'groups'
x.columns[block_col] = 'blocks'
x.columns[y_col] = 'y'
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, 'y', 'groups', 'blocks'
def posthoc_conover(a, val_col = None, group_col = None, p_adjust = None, sort = True):
'''
Post-hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
group_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] <NAME> and <NAME> (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
array([ [-1. , 0.00119517, 0.00278329],
[ 0.00119517, -1. , 0.18672227],
[ 0.00278329, 0.18672227, -1. ]])
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg[i] - x_ranks_avg[j])
B = (1. / x_lens[i] + 1. / x_lens[j])
D = (x_len_overall - 1. - H) / (x_len_overall - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df = x_len_overall - x_len)
return p_value
def get_ties(x):
x_sorted = np.array(np.sort(x))
tie_sum = 0
pos = 0
while pos < x_len_overall:
n_ties = len(x_sorted[x_sorted == x_sorted[pos]])
pos = pos + n_ties
if n_ties > 1:
tie_sum += n_ties ** 3. - n_ties
c = np.min([1., 1. - tie_sum / (x_len_overall ** 3. - x_len_overall)])
return c
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_groups_unique = x[group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(by=group_col)[val_col].count().values
x_flat = x[val_col].values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x_flat[j:j + x_lens[i]] for i, j in enumerate(x_lens_cumsum)])
else:
x = np.array(a)
x_grouped = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_flat = np.concatenate(x_grouped)
x_len = len(x_grouped)
x_lens = np.asarray([len(a) for a in x_grouped])
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_len_overall = len(x_flat)
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_ranks = ss.rankdata(x_flat)
x_ranks_grouped = np.array([x_ranks[j:j + x_lens[i]] for i, j in enumerate(x_lens_cumsum)])
x_ranks_avg = [np.mean(z) for z in x_ranks_grouped]
x_ties = get_ties(x_ranks) #ss.tiecorrect(x_ranks)
H = ss.kruskal(*x_grouped)[0]
if x_ties == 1:
S2 = x_len_overall * (x_len_overall + 1.) / 12.
else:
S2 = (1. / (x_len_overall - 1.)) * (np.sum(x_ranks ** 2.) - (x_len_overall * (((x_len_overall + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len), dtype=np.float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = compare_conover(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
else:
return vs
def posthoc_dunn(a, val_col = None, group_col = None, p_adjust = None, sort = True):
'''
Post-hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
group_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] <NAME> (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] <NAME> (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
array([[-1. 0.01764845 0.04131415]
[ 0.01764845 -1. 0.45319956]
[ 0.04131415 0.45319956 -1. ]])
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg[i] - x_ranks_avg[j])
A = x_len_overall * (x_len_overall + 1.) / 12.
B = (1. / x_lens[i] + 1. / x_lens[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
def get_ties(x):
x_sorted = np.array(np.sort(x))
tie_sum = 0
pos = 0
while pos < x_len_overall:
n_ties = len(x_sorted[x_sorted == x_sorted[pos]])
pos = pos + n_ties
if n_ties > 1:
tie_sum += n_ties ** 3. - n_ties
c = tie_sum / (12. * (x_len_overall - 1))
return c
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_groups_unique = x[group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(by=group_col)[val_col].count().values
x_flat = x[val_col].values
else:
x = np.array(a)
x = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_flat = np.concatenate(x)
x_len = len(x)
x_lens = np.asarray([len(a) for a in x])
x_len_overall = len(x_flat)
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_ranks = ss.rankdata(x_flat)
x_ranks_grouped = np.array([x_ranks[j:j + x_lens[i]] for i, j in enumerate(x_lens_cumsum)])
x_ranks_avg = [np.mean(z) for z in x_ranks_grouped]
x_ties = get_ties(x_ranks)
vs = np.zeros((x_len, x_len), dtype=np.float)
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
else:
return vs
def posthoc_nemenyi(a, val_col = None, group_col = None, dist = 'chi', sort = True):
'''
Post-hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
group_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] <NAME> (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
array([[-1. , 0.02206238, 0.06770864],
[ 0.02206238, -1. , 0.75361555],
[ 0.06770864, 0.75361555, -1. ]])
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg[i] - x_ranks_avg[j])
A = x_len_overall * (x_len_overall + 1.) / 12.
B = (1. / x_lens[i] + 1. / x_lens[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg[i] - x_ranks_avg[j])
B = (1. / x_lens[i] + 1. / x_lens[j])
q = diff / np.sqrt((x_len_overall * (x_len_overall + 1.) / 12.) * B)
return q
def get_ties(x):
x_sorted = np.array(np.sort(x))
tie_sum = 0
pos = 0
while pos < x_len_overall:
n_ties = len(x_sorted[x_sorted == x_sorted[pos]])
pos = pos + n_ties
if n_ties > 1:
tie_sum += n_ties ** 3. - n_ties
c = np.min([1., 1. - tie_sum / (x_len_overall ** 3. - x_len_overall)])
return c
def get_ties_conover(x):
x_sorted = np.array(np.sort(x))
tie_sum = 0
pos = 0
while pos < x_len_overall:
n_ties = len(x_sorted[x_sorted == x_sorted[pos]])
pos = pos + n_ties
if n_ties > 1:
tie_sum += n_ties ** 3. - n_ties
c = np.min([1., 1. - tie_sum / (x_len_overall ** 3. - x_len_overall)])
return c
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_groups_unique = x[group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(by=group_col)[val_col].count().values
x_flat = x[val_col].values
else:
x = np.array(a)
x = np.array([np.asarray(a)[~np.isnan(a)] for a in x])
x_flat = np.concatenate(x)
x_len = len(x)
x_lens = np.asarray([len(a) for a in x])
x_len_overall = len(x_flat)
if any(x_lens == 0):
raise ValueError("All groups must contain data")
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_ranks = ss.rankdata(x_flat)
x_ranks_grouped = np.array([x_ranks[j:j + x_lens[i]] for i, j in enumerate(x_lens_cumsum)])
x_ranks_avg = [np.mean(z) for z in x_ranks_grouped]
x_ties = get_ties(x_ranks)
vs = np.zeros((x_len, x_len), dtype=np.float)
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 'chi':
for i,j in combs:
vs[i, j] = compare_stats_chi(i, j) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i,j in combs:
vs[i, j] = compare_stats_tukey(i, j) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
if isinstance(x, DataFrame):
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
else:
return vs
def posthoc_nemenyi_friedman(a, y_col = None, block_col = None, group_col = None, melted = False, sort = False):
'''
Calculate pairwise comparisons using Nemenyi post-hoc test for unreplicated
blocked data. This test is usually conducted post-hoc after
significant results of the Friedman's test. The statistics refer to upper
quantiles of the studentized range distribution (Tukey) [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains block names.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post-hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] <NAME> (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] <NAME> (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] <NAME> (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col, block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col = None, block_col = None, group_col = None, melted = False, sort = False, p_adjust = None):
'''
Calculate pairwise comparisons using Conover post-hoc test for unreplicated
blocked data. This test is usually conducted post-hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains block names.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post-hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] <NAME> and <NAME> (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] <NAME> (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
tval = dif / np.sqrt(A / B)
pval = 2. * ss.t.sf(np.abs(tval), df = (n-1)*(k-1))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
C1 = (n * k * (k + 1) ** 2) / 4
TT = np.sum([((R[g] - ((n * (k + 1))/2)) ** 2) for g in groups])
T1 = ((k - 1) * TT) / (A1 - C1)
A = 2 * k * (1 - T1 / (k * (n-1))) * ( A1 - C1)
B = (n - 1) * (k - 1)
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, y_col = None, group_col = None, sort = False, p_adjust = None):
'''
Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p-values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] <NAME>., <NAME>., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x = __convert_to_df(a, y_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
x_groups_unique = x[group_col].unique()
x['ranks'] = x.rank()
Ri = x.groupby(group_col)[val_col].mean()
ni = x.groupby(group_col)[val_col].count()
k = x[group_col].unique().size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
return (Ri[u] - Ri[m]) / (sigma / np.sqrt(2) * np.sqrt(1. / ni[m] + 1. / ni[u]))
stat = np.empty((k-1, k-1))
for i, j in it.combinations(range(k), 2):
u = j
m = np.arange(i, u-1)
tmp = compare(m, u)
stat[j-1, i] = np.max(tmp)
p_values = psturng(stat, k, np.inf)
tri_upper = np.triu_indices(p_values.shape[0], 1)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
if p_adjust:
p_values[tri_upper] = multipletests(p_values[tri_upper], method = p_adjust)[1]
np.fill_diagonal(p_values, -1)
return DataFrame(p_values, index=x_groups_unique, columns=x_groups_unique)
def posthoc_siegel_friedman(a, y_col = None, block_col = None, group_col = None, melted = False, sort = False, p_adjust = None):
'''
Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains block names.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] <NAME>, <NAME>. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col = None, block_col = None, group_col = None, melted = False, sort = False):
'''
Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains block names.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] <NAME>, <NAME>, <NAME> (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] <NAME>r. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] <NAME> (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
#if not sort:
# x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
# x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[group_col,block_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col = None, block_col = None, group_col = None, melted = False, sort = False, p_adjust = None):
'''
Pairwise post-hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains block names.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] <NAME> and <NAME> (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] <NAME> (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col = None, group_col = None, midrank = True, sort = False, p_adjust = None):
'''
Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
group_col : str
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] <NAME>, <NAME> (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
groups = x[group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[group_col] == groups[i], val_col], x.loc[x[group_col] == groups[j], val_col]])[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col = None, block_col = None, group_col = None, dist = 't', melted = False, sort = False, p_adjust = None):
'''
Calculate pairwise comparisons using Quade's post-hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains block names.
group_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] <NAME> (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] <NAME> and <NAME> (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] <NAME> (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = (b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda x, y: x['rr'] * y[x['blocks']], axis=1, args=(q,))
x['w'] = x.apply(lambda x, y: x['r'] * y[x['blocks']], axis=1, args=(q,))
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1) * (2 * n + 1) * (k-1)) / (18 * n * (n + 1)))
ff = 1. / (b * (b + 1)/2)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_mackwolfe(a, val_col, group_col, p = None, n_perm = 100, sort = False, p_adjust = None):
'''
Mack-Wolfe Test for Umbrella Alternatives.
In dose-finding studies one may assume an increasing treatment effect with
increasing dose level. However, the test subject may actually succumb to
toxic effects at high doses, which leads to decresing treatment
effects [1]_, [2]_.
The scope of the Mack-Wolfe Test is to test for umbrella alternatives for
either a known or unknown point P (i.e. dose-level), where the peak
(umbrella point) is present.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str or int
Name (string) or index (int) of a column in a pandas DataFrame or an
array that contains quantitative data.
group_col : str or int
Name (string) or index (int) of a column in a pandas DataFrame or an
array that contains group names.
p : int, optional
The a-priori known peak as an ordinal number of the treatment group
including the zero dose level, i.e. p = {1, ..., k}. Defaults to None.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] <NAME>. (1991) Notes on the Mack-Wolfe and Chen-Wolfe Tests for
Umbrella Alternatives. Biom. J., 33, 281-290.
.. [2] <NAME>., <NAME>. (1981) K-sample rank tests for umbrella
alternatives. J. Amer. Statist. Assoc., 76, 175-181.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_mackwolfe(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
k = x[group_col].unique().size
if p:
if p > k:
print("Selected 'p' > number of groups:", str(p), " > ", str(k))
return False
elif p < 1:
print("Selected 'p' < 1: ", str(p))
return False
Rij = x[val_col].rank()
n = x.groupby(group_col)[val_col].count()
def _fn(Ri, Rj):
return np.sum(Ri.apply(lambda x: Rj[Rj > x].size))
def _ustat(Rij, g, k):
levels = np.unique(g)
U = np.identity(k)
for i in range(k):
for j in range(i):
U[i,j] = _fn(Rij[x[group_col] == levels[i]], Rij[x[group_col] == levels[j]])
U[j,i] = _fn(Rij[x[group_col] == levels[j]], Rij[x[group_col] == levels[i]])
return U
def _ap(p, U):
tmp1 = 0
if p > 0:
for i in range(p):
for j in range(i+1, p+1):
tmp1 += U[i,j]
tmp2 = 0
if p < k:
for i in range(p, k):
for j in range(i+1, k):
tmp2 += U[j,i]
return tmp1 + tmp2
def _n1(p, n):
return np.sum(n[:p+1])
def _n2(p, n):
return np.sum(n[p:k])
def _mean_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
return (N1**2 + N2**2 - np.sum(n**2) - n.iloc[p]**2)/4
def _var_at(p, n):
N1 = _n1(p, n)
N2 = _n2(p, n)
N = np.sum(n)
var = (2 * (N1**3 + N2**3) + 3 * (N1**2 + N2**2) -\
np.sum(n**2 * (2*n + 3)) - n.iloc[p]**2 * (2 * n.iloc[p] + 3) +\
12. * n.iloc[p] * N1 * N2 - 12. * n.iloc[p] ** 2 * N) / 72.
return var
if p:
if (x.groupby(val_col).count() > 1).any().any():
print("Ties are present")
U = _ustat(Rij, x[group_col], k)
est = _ap(p, U)
mean = _mean_at(p, n)
sd = np.sqrt(_var_at(p, n))
stat = (est - mean)/sd
p_value = ss.norm.sf(stat)
else:
U = _ustat(Rij, x[group_col], k)
Ap = np.array([_ap(i, U) for i in range(k)]).ravel()
mean = np.array([_mean_at(i, n) for i in range(k)]).ravel()
var = np.array([_var_at(i, n) for i in range(k)]).ravel()
A = (Ap - mean) / np.sqrt(var)
stat = np.max(A)
p = A == stat
est = None
mt = []
for i in range(n_perm):
ix = Series(np.random.permutation(Rij))
Uix = _ustat(ix, x[group_col], k)
Apix = np.array([_ap(i, Uix) for i in range(k)])
Astarix = (Apix - mean) / np.sqrt(var)
mt.append(np.max(Astarix))
mt = np.array(mt)
p_value = mt[mt > stat] / n_perm
return p_value, stat
def posthoc_vanwaerden(a, val_col, group_col, sort = False, p_adjust = None):
'''
Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str or int
Name (string) or index (int) of a column in a pandas DataFrame or an
array that contains quantitative data.
group_col : str or int
Name (string) or index (int) of a column in a pandas DataFrame or an
array that contains group names.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions
for one-way-ANOVA and subsequent post-hoc tests, the van der Waerden test
vanWaerden.test using normal scores can be employed. Provided that
significant differences were detected by this global test, one may be
interested in applying post-hoc tests according to van der Waerden
for pairwise multiple comparisons of the group levels.
There is no tie correction applied in this function.
References
----------
.. [1] <NAME> and <NAME> (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] <NAME> (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x = __convert_to_df(a, val_col, group_col)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col], ascending=True, inplace=True)
groups = x[group_col].unique()
n = x[val_col].size
k = groups.size
r = ss.rankdata(x[val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(group_col)['z_scores'].sum()
nj = x.groupby(group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
param = k - 1
A = aj / nj
vs = np.zeros((k, k), dtype=np.float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df = n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col = None, group_col = None, pool_sd = False, equal_var = True, p_adjust = None, sort = True):
'''
Pairwise T test for multiple comparisons of independent groups. May be
used after an ordinary ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
group_col : str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
if isinstance(a, DataFrame):
x = a.copy()
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
x_lens = x.groupby(by=group_col)[val_col].count().values
x_lens_cumsum = np.insert(np.cumsum(x_lens), 0, 0)[:-1]
x_grouped = np.array([x[val_col][j:(j + x_lens[i])] for i, j in enumerate(x_lens_cumsum)])
#x_grouped = [x.loc[v, val_col].values.tolist() for g, v in x.groupby(group_col, sort=False).groups.items()]
else:
x =
|
np.array(a)
|
numpy.array
|
"""Bearing Element module.
This module defines the BearingElement classes which will be used to represent the rotor
bearings and seals. There are 7 different classes to represent bearings options,
and 2 element options with 8 or 12 degrees of freedom.
"""
import os
import warnings
from inspect import signature
import numpy as np
import toml
from plotly import graph_objects as go
from scipy import interpolate as interpolate
from ross.element import Element
from ross.fluid_flow import fluid_flow as flow
from ross.fluid_flow.fluid_flow_coefficients import (
calculate_stiffness_and_damping_coefficients,
)
from ross.units import Q_, check_units
from ross.utils import read_table_file
__all__ = [
"BearingElement",
"SealElement",
"BallBearingElement",
"RollerBearingElement",
"BearingFluidFlow",
"BearingElement6DoF",
"MagneticBearingElement",
]
class BearingElement(Element):
"""A bearing element.
This class will create a bearing element.
Parameters can be a constant value or speed dependent.
For speed dependent parameters, each argument should be passed
as an array and the correspondent speed values should also be
passed as an array.
Values for each parameter will be_interpolated for the speed.
Parameters
----------
n : int
Node which the bearing will be located in
kxx : float, array, pint.Quantity
Direct stiffness in the x direction (N/m).
cxx : float, array, pint.Quantity
Direct damping in the x direction (N*s/m).
kyy : float, array, pint.Quantity, optional
Direct stiffness in the y direction (N/m).
(defaults to kxx)
cyy : float, array, pint.Quantity, optional
Direct damping in the y direction (N*s/m).
(defaults to cxx)
kxy : float, array, pint.Quantity, optional
Cross coupled stiffness in the x direction (N/m).
(defaults to 0)
cxy : float, array, pint.Quantity, optional
Cross coupled damping in the x direction (N*s/m).
(defaults to 0)
kyx : float, array, pint.Quantity, optional
Cross coupled stiffness in the y direction (N/m).
(defaults to 0)
cyx : float, array, pint.Quantity, optional
Cross coupled damping in the y direction (N*s/m).
(defaults to 0)
frequency : array, pint.Quantity, optional
Array with the frequencies (rad/s).
tag : str, optional
A tag to name the element
Default is None.
n_link : int, optional
Node to which the bearing will connect. If None the bearing is
connected to ground.
Default is None.
scale_factor : float, optional
The scale factor is used to scale the bearing drawing.
Default is 1.
color : str, optional
A color to be used when the element is represented.
Default is '#355d7a' (Cardinal).
Examples
--------
>>> # A bearing element located in the first rotor node, with these
>>> # following stiffness and damping coefficients and speed range from
>>> # 0 to 200 rad/s
>>> import ross as rs
>>> kxx = 1e6
>>> kyy = 0.8e6
>>> cxx = 2e2
>>> cyy = 1.5e2
>>> frequency = np.linspace(0, 200, 11)
>>> bearing0 = rs.BearingElement(n=0, kxx=kxx, kyy=kyy, cxx=cxx, cyy=cyy, frequency=frequency)
>>> bearing0.K(frequency) # doctest: +ELLIPSIS
array([[[1000000., 1000000., ...
>>> bearing0.C(frequency) # doctest: +ELLIPSIS
array([[[200., 200., ...
"""
@check_units
def __init__(
self,
n,
kxx,
cxx,
kyy=None,
kxy=0,
kyx=0,
cyy=None,
cxy=0,
cyx=0,
frequency=None,
tag=None,
n_link=None,
scale_factor=1,
color="#355d7a",
**kwargs,
):
if frequency is not None:
self.frequency = np.array(frequency, dtype=np.float64)
else:
self.frequency = frequency
args = ["kxx", "kyy", "kxy", "kyx", "cxx", "cyy", "cxy", "cyx"]
# all args to coefficients
args_dict = locals()
if kyy is None:
args_dict["kyy"] = kxx
if cyy is None:
args_dict["cyy"] = cxx
# check coefficients len for consistency
coefficients_len = []
for arg in args:
coefficient, interpolated = self._process_coefficient(args_dict[arg])
setattr(self, arg, coefficient)
setattr(self, f"{arg}_interpolated", interpolated)
coefficients_len.append(len(coefficient))
if frequency is not None and type(frequency) != float:
coefficients_len.append(len(args_dict["frequency"]))
if len(set(coefficients_len)) > 1:
raise ValueError(
"Arguments (coefficients and frequency)"
" must have the same dimension"
)
else:
for c in coefficients_len:
if c != 1:
raise ValueError(
"Arguments (coefficients and frequency)"
" must have the same dimension"
)
self.n = n
self.n_link = n_link
self.n_l = n
self.n_r = n
self.tag = tag
self.color = color
self.scale_factor = scale_factor
self.dof_global_index = None
def _process_coefficient(self, coefficient):
"""Helper function used to process the coefficient data."""
interpolated = None
if isinstance(coefficient, (int, float)):
if self.frequency is not None and type(self.frequency) != float:
coefficient = [coefficient for _ in range(len(self.frequency))]
else:
coefficient = [coefficient]
if len(coefficient) > 1:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
interpolated = interpolate.UnivariateSpline(
self.frequency, coefficient
)
# dfitpack.error is not exposed by scipy
# so a bare except is used
except:
try:
if len(self.frequency) in (2, 3):
interpolated = interpolate.interp1d(
self.frequency,
coefficient,
kind=len(self.frequency) - 1,
fill_value="extrapolate",
)
except:
raise ValueError(
"Arguments (coefficients and frequency)"
" must have the same dimension"
)
else:
interpolated = interpolate.interp1d(
[0, 1],
[coefficient[0], coefficient[0]],
kind="linear",
fill_value="extrapolate",
)
return coefficient, interpolated
def plot(
self,
coefficients=None,
frequency_units="rad/s",
stiffness_units="N/m",
damping_units="N*s/m",
fig=None,
**kwargs,
):
"""Plot coefficient vs frequency.
Parameters
----------
coefficients : list, str
List or str with the coefficients to plot.
frequency_units : str
Frequency units.
Default is rad/s.
y_units : str
**kwargs : optional
Additional key word arguments can be passed to change the plot layout only
(e.g. width=1000, height=800, ...).
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
Example
-------
>>> bearing = bearing_example()
>>> fig = bearing.plot('kxx')
>>> # fig.show()
"""
if fig is None:
fig = go.Figure()
if isinstance(coefficients, str):
coefficients = [coefficients]
# check coefficients consistency
coefficients_set = set([coeff[0] for coeff in coefficients])
if len(coefficients_set) > 1:
raise ValueError("Can only plot stiffness or damping in the same plot.")
coeff_to_plot = coefficients_set.pop()
if coeff_to_plot == "k":
default_units = "N/m"
y_units = stiffness_units
else:
default_units = "N*s/m"
y_units = damping_units
frequency_range = np.linspace(min(self.frequency), max(self.frequency), 30)
for coeff in coefficients:
y_value = (
Q_(
getattr(self, f"{coeff}_interpolated")(frequency_range),
default_units,
)
.to(y_units)
.m
)
frequency_range = Q_(frequency_range, "rad/s").to(frequency_units).m
fig.add_trace(
go.Scatter(
x=frequency_range,
y=y_value,
mode="lines",
showlegend=False,
hovertemplate=f"Frequency ({frequency_units}): %{{x:.2f}}<br> Coefficient ({y_units}): %{{y:.3e}}",
)
)
fig.update_xaxes(title_text=f"Frequency ({frequency_units})")
fig.update_yaxes(exponentformat="power")
fig.update_layout(**kwargs)
return fig
def __repr__(self):
"""Return a string representation of a bearing element.
Returns
-------
A string representation of a bearing element object.
Examples
--------
>>> bearing = bearing_example()
>>> bearing # doctest: +ELLIPSIS
BearingElement(n=0, n_link=None,
kxx=[...
"""
return (
f"{self.__class__.__name__}"
f"(n={self.n}, n_link={self.n_link},\n"
f" kxx={self.kxx}, kxy={self.kxy},\n"
f" kyx={self.kyx}, kyy={self.kyy},\n"
f" cxx={self.cxx}, cxy={self.cxy},\n"
f" cyx={self.cyx}, cyy={self.cyy},\n"
f" frequency={self.frequency}, tag={self.tag!r})"
)
def __eq__(self, other):
"""Equality method for comparasions.
Parameters
----------
other: object
The second object to be compared with.
Returns
-------
bool
True if the comparison is true; False otherwise.
Examples
--------
>>> bearing1 = bearing_example()
>>> bearing2 = bearing_example()
>>> bearing1 == bearing2
True
"""
compared_attributes = [
"kxx",
"kyy",
"kxy",
"kyx",
"cxx",
"cyy",
"cxy",
"cyx",
"frequency",
]
if isinstance(other, self.__class__):
init_args = []
for arg in signature(self.__init__).parameters:
if arg not in ["kwargs"]:
init_args.append(arg)
init_args_comparison = []
for arg in init_args:
comparison = getattr(self, arg) == getattr(other, arg)
try:
comparison = all(comparison)
except TypeError:
pass
init_args_comparison.append(comparison)
init_args_comparison = all(init_args_comparison)
attributes_comparison = all(
(
(
np.array(getattr(self, attr)) == np.array(getattr(other, attr))
).all()
for attr in compared_attributes
)
)
return init_args_comparison and attributes_comparison
return False
def __hash__(self):
return hash(self.tag)
def save(self, file):
try:
data = toml.load(file)
except FileNotFoundError:
data = {}
# save initialization args and coefficients
args = list(signature(self.__init__).parameters)
args += [
"kxx",
"kyy",
"kxy",
"kyx",
"cxx",
"cyy",
"cxy",
"cyx",
]
brg_data = {}
for arg in args:
if arg not in ["kwargs"]:
brg_data[arg] = self.__dict__[arg]
# change np.array to lists so that we can save in .toml as list(floats)
for k, v in brg_data.items():
if isinstance(v, np.generic):
brg_data[k] = brg_data[k].item()
elif isinstance(v, np.ndarray):
brg_data[k] = brg_data[k].tolist()
# case for a container with np.float (e.g. list(np.float))
else:
try:
brg_data[k] = [i.item() for i in brg_data[k]]
except (TypeError, AttributeError):
pass
data[f"{self.__class__.__name__}_{self.tag}"] = brg_data
with open(file, "w") as f:
toml.dump(data, f)
def dof_mapping(self):
"""Degrees of freedom mapping.
Returns a dictionary with a mapping between degree of freedom and its
index.
Returns
-------
dof_mapping : dict
A dictionary containing the degrees of freedom and their indexes.
Examples
--------
The numbering of the degrees of freedom for each node.
Being the following their ordering for a node:
x_0 - horizontal translation
y_0 - vertical translation
>>> bearing = bearing_example()
>>> bearing.dof_mapping()
{'x_0': 0, 'y_0': 1}
"""
return dict(x_0=0, y_0=1)
def M(self):
"""Mass matrix for an instance of a bearing element.
This method returns the mass matrix for an instance of a bearing
element.
Returns
-------
M : np.ndarray
Mass matrix (kg).
Examples
--------
>>> bearing = bearing_example()
>>> bearing.M()
array([[0., 0.],
[0., 0.]])
"""
M = np.zeros_like(self.K(0))
return M
def K(self, frequency):
"""Stiffness matrix for an instance of a bearing element.
This method returns the stiffness matrix for an instance of a bearing
element.
Parameters
----------
frequency : float
The excitation frequency (rad/s).
Returns
-------
K : np.ndarray
A 2x2 matrix of floats containing the kxx, kxy, kyx, and kyy values.
Examples
--------
>>> bearing = bearing_example()
>>> bearing.K(0)
array([[1000000., 0.],
[ 0., 800000.]])
"""
kxx = self.kxx_interpolated(frequency)
kyy = self.kyy_interpolated(frequency)
kxy = self.kxy_interpolated(frequency)
kyx = self.kyx_interpolated(frequency)
K = np.array([[kxx, kxy], [kyx, kyy]])
if self.n_link is not None:
# fmt: off
K = np.vstack((
|
np.hstack([K, -K])
|
numpy.hstack
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.