prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#! /usr/bin/env python
import argparse, sys, os, errno
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)s [%(levelname)s] : %(message)s')
def prepare_output_file(filename):
try:
os.makedirs(os.path.dirname(filename))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
def read_hdf5(filename, datasets):
import h5py
f = h5py.File(filename, 'r')
if isinstance(datasets, list) or isinstance(datasets, tuple):
data = []
for dataset in datasets:
data.append(f[dataset][:])
else:
data = f[datasets][:]
f.close()
return data
def array_lookup(mapping, keys):
"""
Get indices of matched strings in a numpy array
:param mapping: a 1D array of strings
:param keys: a 1D array of keys to lookup
:return: a 1D array of indices of keys
"""
d = {k: i for i, k in enumerate(mapping)}
return np.asarray([d[k] for k in keys])
from keras.preprocessing.image import apply_transform, transform_matrix_offset_center, random_channel_shift
from keras.preprocessing.image import ImageDataGenerator
class ImagePairDataGenerator(ImageDataGenerator):
def random_transform_pair(self, x, y, seed=None):
"""Randomly augment a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
y = apply_transform(y, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
return x, y
class BatchImageDataGenerator(object):
def __init__(self, X, y, image_generator, batch_size=20, transform_y=False):
self.batch_size = batch_size
self.n_batches = int(np.ceil(float(y.shape[0])/batch_size))
self.steps_per_epoch = self.n_batches
self.transform_y = transform_y
self.X = X
self.y = y
self.image_generator = image_generator
self.n_samples = X.shape[0]
def __call__(self):
while True:
indices = np.random.permutation(self.n_samples)
for i in range(self.n_batches):
indices_batch = indices[(i*self.batch_size):min((i + 1)*self.batch_size, self.n_samples)]
X_batch = np.empty([len(indices_batch)] + list(self.X.shape[1:]), dtype=self.X.dtype)
if self.transform_y:
y_batch = np.empty([len(indices_batch)] + list(self.y.shape[1:]), dtype=self.y.dtype)
for j, k in enumerate(indices_batch):
X_batch[i], y_batch[j] = self.image_generator.random_transform_pair(self.X[k], self.y[k])
else:
for j, k in enumerate(indices_batch):
X_batch[j] = self.image_generator.random_transform(self.X[k])
y_batch = self.y[indices_batch]
yield X_batch, y_batch
class UpsampleImageDataGenerator(object):
def __init__(self, X, y, image_generator, batch_size=20):
self.batch_size = 20
n_positives = np.count_nonzero(y == 1)
n_negatives = np.count_nonzero(y == 0)
logger.info('number of positive/negative samples: %d/%d' % (n_positives, n_negatives))
major_class = 1 if (n_positives > n_negatives) else 0
self.indices_major = np.nonzero(y == major_class)[0]
self.indices_minor = np.nonzero(y != major_class)[0]
self.n_batches = np.round(len(self.indices_major) * 2 / batch_size)
self.steps_per_epoch = self.n_batches
self.X = X
self.y = y
self.image_generator = image_generator
def __call__(self):
while True:
indices_major_rand = np.random.permutation(self.indices_major)
indices_minor_rand = np.random.choice(self.indices_minor, replace=True, size=len(indices_major_rand))
for i in range(self.n_batches):
start = i * (self.batch_size / 2)
end = (i + 1) * (self.batch_size / 2)
indices_batch = np.concatenate([indices_major_rand[start:end],
indices_minor_rand[start:end]])
X_batch = np.empty([len(indices_batch)] + list(self.X.shape[1:]), dtype=self.X.dtype)
for j, k in enumerate(indices_batch):
X_batch[j] = self.image_generator.random_transform(self.X[k])
y_batch = self.y[indices_batch]
yield X_batch, y_batch
def classify_types(args):
from models import get_pretrained_vgg16
import numpy as np
import h5py
import keras
globals().update(locals())
def data_generator(X, y, batch_size=25):
n_samples = X.shape[0]
while True:
indices = np.random.permutation(n_samples)
for i_batch in range(n_samples / batch_size):
indices_batch = indices[(i_batch * batch_size):((i_batch + 1) * batch_size)]
yield X[indices_batch], y[indices_batch]
logger.info('read input file: ' + args.input_file)
fin = h5py.File(args.input_file, 'r')
X = fin['X'][:]
positions = fin['position'][:]
fin.close()
y = (positions == 'C').astype('int32')
logger.info('labels: %s' % str(positions[:10]))
if args.indices_file is not None:
fin = h5py.File(args.indices_file, 'r')
indices = fin[args.indices_name][:]
fin.close()
X = np.take(X, indices, axis=0)
y = np.take(y, indices, axis=0)
logger.info('number of training samples: %d' % indices.shape[0])
X = np.repeat(X, 3, axis=3)
n_samples = X.shape[0]
generator = data_generator(X, y, args.batch_size)
logger.info('build model')
input_shape = X.shape[1:]
logger.info('input_shape = %s' % repr(X.shape))
# model = get_model(args.model_name, input_shape)
logger.info('load pretrained vgg16 model: data/VGG_imagenet.npy')
model = get_pretrained_vgg16('data/VGG_imagenet.npy', input_shape)
model.summary()
logger.info('train the model')
model.fit_generator(generator, steps_per_epoch=n_samples / args.batch_size,
epochs=args.epochs,
callbacks=[keras.callbacks.TensorBoard(log_dir=args.output_file + '.tensorboard')])
logger.info('save the model: ' + args.output_file)
prepare_output_file(args.output_file)
model.save(args.output_file)
def classify_diseases(args):
import keras
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from sklearn.metrics import accuracy_score, roc_auc_score
from models import add_fc_layers
import h5py
globals().update(locals())
logger.info('read cv_split file: ' + args.cv_split_file)
logger.info('use cv fold: %d' % args.cv_fold)
fin = h5py.File(args.cv_split_file, 'r')
image_id_train = fin['/%d/train' % args.cv_fold][:]
image_id_test = fin['/%d/test' % args.cv_fold][:]
image_id_valid = fin['/%d/valid' % args.cv_fold][:]
fin.close()
logger.info('read input images file: ' + args.input_file)
fin = h5py.File(args.input_file, 'r')
X = fin['X'][:]
image_id_X = fin['image_id'][:]
fin.close()
logger.info('convert gray-scale images to 3-channel images')
X_train = np.repeat(X[array_lookup(image_id_X, image_id_train)], 3, axis=3)
X_test = np.repeat(X[array_lookup(image_id_X, image_id_test)], 3, axis=3)
X_valid = np.repeat(X[array_lookup(image_id_X, image_id_valid)], 3, axis=3)
del X
logger.info('read targets file: ' + args.target_file)
fin = h5py.File(args.target_file, 'r')
y = fin['y'][:]
image_id_y = fin['image_id'][:]
fin.close()
y_train = np.take(y, array_lookup(image_id_y, image_id_train), axis=0)
y_test = np.take(y, array_lookup(image_id_y, image_id_test), axis=0)
y_valid = np.take(y, array_lookup(image_id_y, image_id_valid), axis=0)
del y
if args.mask_file is not None:
logger.info('read mask data from file: ' + args.mask_file)
fin = h5py.File(args.mask_file, 'r')
mask = fin['X'][:]
image_id_mask = fin['image_id'][:]
fin.close()
# set
mask = mask.astype('float32')
mask = np.clip(mask, 0.2, 1.0)
logger.info('apply mask to input images')
X_train *= mask[array_lookup(image_id_mask, image_id_train)]
X_test *= mask[array_lookup(image_id_mask, image_id_test)]
X_valid *= mask[array_lookup(image_id_mask, image_id_valid)]
del mask
# multi-class
if len(y_train.shape) > 1:
n_classes = y_train.shape[1]
class_freq = {c: count for c, count in enumerate(y_train.sum(axis=0))}
# two-class
else:
class_freq = {c: count for c, count in zip(*np.unique(y_train, return_counts=True))}
n_classes = 2
logger.info('number of classes: %d' % (n_classes))
logger.info('class frequencies in training data: ' + repr(class_freq))
image_generator = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=3,
width_shift_range=0.1,
height_shift_range=0.1,
channel_shift_range=0.2,
zoom_range=0.1,
horizontal_flip=False)
logger.info('create batch data generator')
data_generator = BatchImageDataGenerator(X_train, y_train,
image_generator=image_generator,
batch_size=args.batch_size,
transform_y=False)
logger.info('read model file: ' + args.pretrained_model_file)
pretrained_model = load_model(args.pretrained_model_file)
if args.fine_tune:
logger.info('fix weights in pretrained model for fine-tuning')
for layer in pretrained_model.layers:
layer.trainable = False
logger.info('add FC layers for classification')
model = add_fc_layers(pretrained_model, n_classes)
model.summary()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
train_log_file = os.path.join(args.output_dir, 'train.log')
logger.info('train the model')
callbacks = [keras.callbacks.CSVLogger(train_log_file)]
class_weight = {c: 1.0 / float(class_freq[c]) for c in class_freq.keys()}
class_weight_norm = sum(class_weight.values())
class_weight = {c: class_weight[c] / class_weight_norm for c in class_weight.keys()}
logger.info('class weight: ' + repr(class_weight))
"""
datagen = data_generator()
for i in range(50):
X_batch, y_batch = datagen.next()
model.train_on_batch(X_batch, y_batch, class_weight=class_weight)
logger.info('evaluate on batch %d (size: %d): %s'%(i, X_batch.shape[0], repr(model.evaluate(X_batch, y_batch))))
"""
model.fit_generator(data_generator(),
steps_per_epoch=data_generator.steps_per_epoch,
epochs=args.epochs,
callbacks=callbacks,
class_weight=class_weight,
validation_data=(X_valid, y_valid))
logger.info('test the model')
y_pred = model.predict(X_test, batch_size=args.batch_size)
if n_classes > 2:
y_pred_labels = | np.argmax(y_pred, axis=1) | numpy.argmax |
import numpy as np
import pytest
from ..utils import ArrayDeque, ExperienceCache, softmax
from ..errors import ArrayDequeOverflowError
def test_softmax():
rnd = np.random.RandomState(7)
w = rnd.randn(3, 5)
x = softmax(w, axis=1)
y = softmax(w + 100., axis=1)
z = softmax(w * 100., axis=1)
# check shape
assert x.shape == w.shape
# check normalization
np.testing.assert_almost_equal(x.sum(axis=1), np.ones(3))
# check translation invariance
np.testing.assert_almost_equal(y.sum(axis=1), np.ones(3))
np.testing.assert_almost_equal(x, y)
# check robustness by clipping
assert not np.any(np.isnan(z))
np.testing.assert_almost_equal(z.sum(axis=1), np.ones(3))
class TestArrayDeque:
def test_cycle(self):
d = ArrayDeque(shape=[], maxlen=3, overflow='cycle')
d.append(1)
d.append(2)
d.append(3)
np.testing.assert_array_equal(d.array, [1, 2, 3])
d.append(4)
np.testing.assert_array_equal(d.array, [2, 3, 4])
assert d.pop() == 4
np.testing.assert_array_equal(d.array, [2, 3])
d.append(5)
np.testing.assert_array_equal(d.array, [2, 3, 5])
d.append(6)
np.testing.assert_array_equal(d.array, [3, 5, 6])
assert d.popleft() == 3
| np.testing.assert_array_equal(d.array, [5, 6]) | numpy.testing.assert_array_equal |
import cv2
import numpy as np
from matplotlib import pyplot as plt
import scipy.misc
from robot import *
from data_collector import DataCollector
from sklearn.neighbors import BallTree
import time
import Tkinter as tk
import pickle
import matplotlib.pyplot as plt
import random
import string
def logTimeStep(collector, arm, sn, filename, nextpos, nextrot, gripper, stop, focus):
f = open('demonstrations/'+filename, 'a')
frame = arm.get_current_cartesian_position()
pos = tuple(frame.position[:3])
rot = tfx.tb_angles(frame.rotation)
rot = (rot.yaw_deg, rot.pitch_deg, rot.roll_deg)
pickle.dump({'pos': pos,
'rot': rot,
'sn': sn,
'image': collector.left_image.dumps(),
'npos': nextpos,
'nextrot': nextrot,
'gripper': gripper,
'stop': stop,
'focus': focus}, f)
f.close()
#demo file name
filename = ''.join(random.choice(string.lowercase) for _ in range(9)) + '.p'
#initialize whereever it is
psm1 = robot("PSM1")
psm1.open_gripper(90)
time.sleep(2)
#do some image processing
d = DataCollector()
time.sleep(1)
img = cv2.medianBlur(d.left_image[:,850:], 7) #[:, 580:1500]
mask = cv2.inRange(img, np.array((100,100,100),dtype = "uint8"), np.array((255,255,255),dtype = "uint8"))
output = np.sign(cv2.bitwise_and(img, img, mask = mask))*255
output = cv2.erode(output,np.array([7,7]),iterations = 1)
estimates = np.argwhere(output[:,:,0] > 0)
tree = BallTree(estimates, leaf_size=2)
N,p = estimates.shape
while True:
i = np.random.choice(np.arange(0,N))
dist, ind = tree.query(estimates[i,:], k=25)
mean = np.mean(estimates[ind[0],:].T, axis=1)
cov = np.cov(estimates[ind[0],:].T)
U, V = np.linalg.eig(cov)
minor = np.argmin(U)
yangle = np.arctan(V[1,minor]/V[0,minor])*180/np.pi
if np.abs(yangle) < 20:
plt.imshow(output, cmap='gray')
ax = plt.axes()
ax.arrow(mean[1], mean[0], 100*V[1,minor], 100*V[0,minor], head_length=30, fc='r', ec='r')
plt.show()
action = raw_input('Play action')
if action == 'y':
focus = [mean[1], mean[0], yangle]
logTimeStep(d, psm1, 0, filename, [], [], False, False, focus)
break
else:
exit()
#pick based on image heuristic
yangle = 0.5* | np.arctan(V[1,minor]/V[0,minor]) | numpy.arctan |
import numpy as np
from ..modflow import Modflow
from .util_array import Util2d, Util3d
class Lgr(object):
def __init__(self, nlayp, nrowp, ncolp, delrp, delcp, topp, botmp,
idomainp, ncpp=3, ncppl=1, xllp=0., yllp=0.):
"""
Parameters
----------
parent : flopy.modflow.Modflow
parent model
nlayp : int
parent layers
nrowp : int
parent number of rows
ncolp : int
parent number of columns
delrp : ndarray
parent delr array
delcp : ndarray
parent delc array
topp : ndarray
parent top array (nrowp, ncolp)
botmp : ndarray
parent botm array (nlayp, nrowp, ncolp)
idomainp : ndarray
parent idomain array used to create the child grid. Ones indicate
a parent cell and zeros indicate a child cell. The domain of the
child grid will span a rectangular region that spans all idomain
cells with a value of zero. idomain must be of shape
(nlayp, nrowp, ncolp)
ncpp : int
number of child cells along the face of a parent cell
ncppl : list of ints
number of child layers per parent layer
xllp : float
x location of parent grid lower left corner
yllp : float
y location of parent grid lower left corner
"""
# parent grid properties
self.nlayp = nlayp
self.nrowp = nrowp
self.ncolp = ncolp
m = Modflow()
self.delrp = Util2d(m, (ncolp,), np.float32, delrp, 'delrp').array
self.delcp = Util2d(m, (nrowp,), np.float32, delcp, 'delcp').array
self.topp = Util2d(m, (nrowp, ncolp), np.float32, topp, 'topp').array
self.botmp = Util3d(m, (nlayp, nrowp, ncolp), np.float32, botmp,
'botmp').array
# idomain
assert idomainp.shape == (nlayp, nrowp, ncolp)
self.idomain = idomainp
idxl, idxr, idxc = np.where(idomainp == 0)
assert idxl.shape[0] > 1, 'no zero values found in idomain'
# # child cells per parent and child cells per parent layer
self.ncpp = ncpp
self.ncppl = Util2d(m, (nlayp,), np.int, ncppl, 'ncppl').array
# parent lower left
self.xllp = xllp
self.yllp = yllp
# child grid properties
self.nplbeg = idxl.min()
self.nplend = idxl.max()
self.npcbeg = idxc.min()
self.npcend = idxc.max()
self.nprbeg = idxr.min()
self.nprend = idxr.max()
# child grid dimensions
self.nlay = self.ncppl.sum()
self.nrow = (self.nprend - self.nprbeg + 1) * ncpp
self.ncol = (self.npcend - self.npcbeg + 1) * ncpp
# assign child properties
self.delr, self.delc = self.get_delr_delc()
self.top, self.botm = self.get_top_botm()
self.xll = xllp + self.delrp[0: self.npcbeg].sum()
self.yll = yllp + self.delcp[self.nprend + 1:].sum()
return
def get_shape(self):
"""
Return the shape of the child grid
Returns
-------
(nlay, nrow, ncol) : tuple
shape of the child grid
"""
return self.nlay, self.nrow, self.ncol
def get_lower_left(self):
"""
Return the lower left corner of the child grid
Returns
-------
(xll, yll) : tuple
location of lower left corner of the child grid
"""
return self.xll, self.yll
def get_delr_delc(self):
# create the delr and delc arrays for this child grid
delr = np.zeros((self.ncol), dtype=float)
delc = np.zeros((self.nrow), dtype=float)
jstart = 0
jend = self.ncpp
for j in range(self.npcbeg, self.npcend + 1):
delr[jstart: jend] = self.delrp[j - 1] / self.ncpp
jstart = jend
jend = jstart + self.ncpp
istart = 0
iend = self.ncpp
for i in range(self.nprbeg, self.nprend + 1):
delc[istart: iend] = self.delcp[i - 1] / self.ncpp
istart = iend
iend = istart + self.ncpp
return delr, delc
def get_top_botm(self):
bt = self.botmp
tp = self.topp
shp = tp.shape
tp = tp.reshape(1, shp[0], shp[1])
pbotm = np.vstack((tp, bt))
botm = np.zeros((self.nlay + 1, self.nrow, self.ncol), dtype=float)
for ip in range(self.nprbeg, self.nprend + 1):
for jp in range(self.npcbeg, self.npcend + 1):
top = pbotm[0, ip, jp]
icrowstart = (ip - self.nprbeg) * self.ncpp
icrowend = icrowstart + self.ncpp
iccolstart = (jp - self.npcbeg) * self.ncpp
iccolend = iccolstart + self.ncpp
botm[0, icrowstart:icrowend, iccolstart:iccolend] = top
kc = 1
for kp in range(self.nplbeg, self.nplend + 1):
top = pbotm[kp, ip, jp]
bot = pbotm[kp + 1, ip, jp]
dz = (top - bot) / self.ncppl[kp - 1]
for _ in range(self.ncppl[kp - 1]):
botm[kc, icrowstart:icrowend,
iccolstart: iccolend] = botm[kc - 1,
icrowstart:icrowend,
iccolstart: iccolend] - dz
kc += 1
return botm[0], botm[1:]
def get_replicated_parent_array(self, parent_array):
"""
Get a two-dimensional array the size of the child grid that has values
replicated from the provided parent array.
Parameters
----------
parent_array : ndarray
A two-dimensional array that is the size of the parent model rows
and columns.
Returns
-------
child_array : ndarray
A two-dimensional array that is the size of the child model rows
and columns
"""
assert parent_array.shape == (self.nrowp, self.ncolp)
child_array = np.empty((self.nrow, self.ncol),
dtype=parent_array.dtype)
for ip in range(self.nprbeg, self.nprend + 1):
for jp in range(self.npcbeg, self.npcend + 1):
icrowstart = (ip - self.nprbeg) * self.ncpp
icrowend = icrowstart + self.ncpp
iccolstart = (jp - self.npcbeg) * self.ncpp
iccolend = iccolstart + self.ncpp
value = parent_array[ip, jp]
child_array[icrowstart:icrowend, iccolstart:iccolend] = value
return child_array
def get_idomain(self):
"""
Return the idomain array for the child model. This will normally
be all ones unless the idomain array for the parent model is
non-rectangular and irregularly shaped. Then, parts of the child
model will have idomain zero cells.
Returns
-------
idomain : ndarray
idomain array for the child model
"""
idomain = np.ones((self.nlay, self.nrow, self.ncol), dtype=np.int)
for kc in range(self.nlay):
for ic in range(self.nrow):
for jc in range(self.ncol):
kp, ip, jp = self.get_parent_indices(kc, ic, jc)
if self.idomain[kp, ip, jp] == 1:
idomain[kc, ic, jc] = 0
return idomain
def get_parent_indices(self, kc, ic, jc):
"""
Method returns the parent cell indices for this child.
The returned indices are in zero-based indexing.
"""
ip = self.nprbeg + int(ic / self.ncpp)
jp = self.npcbeg + int(jc / self.ncpp)
kp = 0
kcstart = 0
for k in range(self.nplbeg, self.nplend + 1):
kcend = kcstart + self.ncppl[k] - 1
if kcstart <= kc <= kcend:
kp = k
break
kcstart = kcend + 1
return kp, ip, jp
def get_parent_connections(self, kc, ic, jc):
"""
Return a list of parent cell indices that are connected to child
cell kc, ic, jc.
"""
assert 0 <= kc < self.nlay, 'layer must be >= 0 and < child nlay'
assert 0 <= ic < self.nrow, 'layer must be >= 0 and < child nrow'
assert 0 <= jc < self.ncol, 'layer must be >= 0 and < child ncol'
parentlist = []
(kp, ip, jp) = self.get_parent_indices(kc, ic, jc)
# parent cell to left
if jc % self.ncpp == 0:
if jp - 1 >= 0:
if self.idomain[kp, ip, jp - 1] != 0:
parentlist.append(((kp, ip, jp - 1), -1))
# parent cell to right
if (jc + 1) % self.ncpp == 0:
if jp + 1 < self.ncolp:
if self.idomain[kp, ip, jp + 1] != 0:
parentlist.append(((kp, ip, jp + 1), 1))
# parent cell to back
if ic % self.ncpp == 0:
if ip - 1 >= 0:
if self.idomain[kp, ip - 1, jp] != 0:
parentlist.append(((kp, ip - 1, jp), 2))
# parent cell to front
if (ic + 1) % self.ncpp == 0:
if ip + 1 < self.nrowp:
if self.idomain[kp, ip + 1, jp] != 0:
parentlist.append(((kp, ip + 1, jp), -2))
# parent cell to top is not possible
# parent cell to bottom
if kc + 1 == self.ncppl[kp]:
if kp + 1 < self.nlayp:
if self.idomain[kp + 1, ip, jp] != 0:
parentlist.append(((kp + 1, ip, jp), -3))
return parentlist
def get_exchange_data(self, angldegx=False, cdist=False):
"""
Get the list of parent/child connections
<cellidm1> <cellidm2> <ihc> <cl1> <cl2> <hwva> <angledegx>
Returns
-------
exglist : list
list of connections between parent and child
"""
exglist = []
nlayc = self.nlay
nrowc = self.nrow
ncolc = self.ncol
delrc = self.delr
delcc = self.delc
delrp = self.delrp
delcp = self.delcp
topp = self.topp
botp = self.botmp
topc = self.top
botc = self.botm
if cdist:
# child xy meshgrid
xc = np.add.accumulate(delrc) - 0.5 * delrc
Ly = np.add.reduce(delcc)
yc = Ly - (np.add.accumulate(delcc) - 0.5 * delcc)
xc += self.xll
yc += self.yll
xc, yc = np.meshgrid(xc, yc)
# parent xy meshgrid
xp = np.add.accumulate(delrp) - 0.5 * delrp
Ly = np.add.reduce(delcp)
yp = Ly - (np.add.accumulate(delcp) - 0.5 * delcp)
xc += self.xllp
yc += self.yllp
xp, yp = np.meshgrid(xp, yp)
cidomain = self.get_idomain()
for kc in range(nlayc):
for ic in range(nrowc):
for jc in range(ncolc):
plist = self.get_parent_connections(kc, ic, jc)
for (kp, ip, jp), idir in plist:
if cidomain[kc, ic, jc] == 0:
continue
# horizontal or vertical connection
ihc = 1
if self.ncppl[kp] > 1:
ihc = 2
if abs(idir) == 3:
ihc = 0
# angldegx
angle = None
if angldegx:
angle = 180. # -x, west
if idir == 2:
angle = 270. # -y, south
elif idir == -1:
angle = 0. # +x, east
elif idir == -2:
angle = 90. # +y, north
# vertical connection
cl1 = None
cl2 = None
hwva = None
tpp = topp[ip, jp]
btp = botp[kp, ip, jp]
if kp > 0:
tpp = botp[kp - 1, ip, jp]
tpc = topc[ic, jc]
btc = botc[kc, ic, jc]
if kc > 0:
tpc = botc[kc - 1, ic, jc]
if ihc == 0:
cl1 = 0.5 * (tpp - btp)
cl2 = 0.5 * (tpc - btc)
hwva = delrc[jc] * delcc[ic]
else:
if abs(idir) == 1:
cl1 = 0.5 * delrp[jp]
cl2 = 0.5 * delrc[jc]
hwva = delcc[ic]
elif abs(idir) == 2:
cl1 = 0.5 * delcp[ip]
cl2 = 0.5 * delcc[ic]
hwva = delrc[jc]
# connection distance
cd = None
if cdist:
if abs(idir) == 3:
cd = cl1 + cl2
else:
x1 = xc[ic, jc]
y1 = yc[ic, jc]
x2 = xp[ip, jp]
y2 = yp[ip, jp]
cd = | np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) | numpy.sqrt |
"""
Author: <NAME>
Collects training and testing data from various sources and transforms them in the required format
"""
import numpy as np
np.random.seed(1337)
import scipy.io as sio
import os
import sys
import random
random.seed(1337)
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
# splitting in training and testing
from sklearn.model_selection import train_test_split
# dataset imports
from tensorflow.keras.datasets import mnist
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.datasets import boston_housing
def shuffle_in_unison_inplace(a, b):
"""
Shuffle the arrays randomly
a: array to shuffle in the same way as b
b: array to shuffle in the same way as a
taken from:
https://github.com/artemyk/ibsgd/blob/master/utils.py
"""
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def load_data(filename, random_labels=False):
"""
Load the data
filename: the name of the dataset
random_labels: True if we want to return random labels to the dataset
return object with data and labels
returns: Opening the Blackbox dataset
taken and adapted from:
https://github.com/ravidziv/IDNNs
"""
C = type('type_C', (object,), {})
data_sets = C()
d = sio.loadmat("../Data/"+ filename + '.mat')
F = d['F']
y = d['y']
C = type('type_C', (object,), {})
data_sets = C()
data_sets.data = F
data_sets.labels = np.squeeze(np.concatenate((y[None, :], 1 - y[None, :]), axis=0).T)
# If we want to assign random labels to the data
if random_labels:
labels = np.zeros(data_sets.labels.shape)
labels_index = | np.random.randint(low=0, high=labels.shape[1], size=labels.shape[0]) | numpy.random.randint |
from builtins import *
import random
import sys
import os
import math
from abc import ABCMeta, abstractmethod
from collections import defaultdict, Counter
from functools import partial
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from .base_tabular_learner import Agent, StationaryAgent
import importlib
import maci.utils as utils
from copy import deepcopy
class BaseQAgent(Agent):
def __init__(self, name, id_, action_num, env, alpha_decay_steps=10000., alpha=0.01, gamma=0.95, episilon=0.1, verbose=True, **kwargs):
super().__init__(name, id_, action_num, env, **kwargs)
self.episilon = episilon
self.alpha_decay_steps = alpha_decay_steps
self.gamma = gamma
self.alpha = alpha
self.epoch = 0
self.Q = None
self.pi = defaultdict(partial(np.random.dirichlet, [1.0] * self.action_num))
self.record = defaultdict(list)
self.verbose = verbose
self.pi_history = [deepcopy(self.pi)]
def done(self, env):
if self.verbose:
utils.pv('self.full_name(game)')
utils.pv('self.Q')
utils.pv('self.pi')
numplots = env.numplots if env.numplots >= 0 else len(self.record)
for s, record in sorted(
self.record.items(), key=lambda x: -len(x[1]))[:numplots]:
self.plot_record(s, record, env)
self.record.clear()
# learning rate decay
def step_decay(self):
# drop = 0.5
# epochs_drop = 10000
# decay_alpha = self.alpha * math.pow(drop, math.floor((1 + self.epoch) / epochs_drop))
# return 1 / (1 / self.alpha + self.epoch * 1e-4)
return self.alpha_decay_steps / (self.alpha_decay_steps + self.epoch)
# return decay_alpha
# def alpha(self, t):
# return self.alpha_decay_steps / (self.alpha_decay_steps + t)
def act(self, s, exploration, game):
if exploration and random.random() < self.episilon:
return random.randint(0, self.action_num - 1)
else:
if self.verbose:
for s in self.Q.keys():
print('{}--------------'.format(self.id_))
print('Q of agent {}: state {}: {}'.format(self.id_, s, str(self.Q[s])))
# print('QAof agent {}: state {}: {}'.format(self.id_, s, str(self.Q_A[s])))
# self.Q_A
print('pi of agent {}: state {}: {}'.format(self.id_, s, self.pi[s]))
# print('pi of opponent agent {}: state{}: {}'.format(self.id_, s, self.opponent_best_pi[s]))
print('{}--------------'.format(self.id_))
# print()
return StationaryAgent.sample(self.pi[s])
@abstractmethod
def update(self, s, a, o, r, s2, env, done=False):
pass
@abstractmethod
def update_policy(self, s, a, env):
pass
def plot_record(self, s, record, env):
os.makedirs('policy/', exist_ok=True)
fig = plt.figure(figsize=(18, 10))
n = self.action_num
for a in range(n):
plt.subplot(n, 1, a + 1)
plt.tight_layout()
plt.gca().set_ylim([-0.05, 1.05])
plt.gca().set_xlim([1.0, env.t + 1.0])
plt.title('player: {}: state: {}, action: {}'.format(self.full_name(env), s, a))
plt.xlabel('step')
plt.ylabel('pi[a]')
plt.grid()
x, y = list(zip(*((t, pi[a]) for t, pi in record)))
x, y = list(x) + [env.t + 1.0], list(y) + [y[-1]]
plt.plot(x, y, 'r-')
fig.savefig('policy/{}_{}.pdf'.format(self.full_name(env), s))
plt.close(fig)
def record_policy(self, s, env):
pass
# if env.numplots != 0:
# if s in self.record:
# self.record[s].append((env.t - 0.01, self.record[s][-1][1]))
# self.record[s].append((env.t, np.copy(self.pi[s])))
class QAgent(BaseQAgent):
def __init__(self, id_, action_num, env, **kwargs):
super().__init__('q', id_, action_num, env, **kwargs)
self.Q = defaultdict(partial(np.random.rand, self.action_num))
self.R = defaultdict(partial(np.zeros, self.action_num))
self.count_R = defaultdict(partial(np.zeros, self.action_num))
def done(self, env):
self.R.clear()
self.count_R.clear()
super().done(env)
def update(self, s, a, o, r, s2, env, done=False):
self.count_R[s][a] += 1.0
self.R[s][a] += (r - self.R[s][a]) / self.count_R[s][a]
Q = self.Q[s]
V = self.val(s2)
decay_alpha = self.step_decay()
if done:
Q[a] = Q[a] + decay_alpha * (r - Q[a])
else:
Q[a] = Q[a] + decay_alpha * (r + self.gamma * V - Q[a])
if self.verbose:
print(self.epoch)
self.update_policy(s, a, env)
self.record_policy(s, env)
self.epoch += 1
def val(self, s):
return np.max(self.Q[s])
def update_policy(self, s, a, env):
Q = self.Q[s]
self.pi[s] = (Q == np.max(Q)).astype(np.double)
class PGAAPPAgent(QAgent):
def __init__(self, id_, action_num, env, eta=0.01, **kwargs):
super().__init__(id_, action_num, env, **kwargs)
self.name = 'pha-app'
self.eta = eta
self.pi_history = [deepcopy(self.pi)]
def update_policy(self, s, a, game):
V = np.dot(self.pi[s], self.Q[s])
delta_hat_A = np.zeros(self.action_num)
delta_A = np.zeros(self.action_num)
for ai in range(self.action_num):
if self.pi[s][ai] == 1:
delta_hat_A[ai]= self.Q[s][ai] - V
else:
delta_hat_A[ai] = (self.Q[s][ai] - V) / (1 - self.pi[s][ai])
delta_A[ai] = delta_hat_A[ai] - self.gamma * abs(delta_hat_A[ai]) *self.pi[s][ai]
self.pi[s] += self.eta * delta_A
StationaryAgent.normalize(self.pi[s])
self.pi_history.append(deepcopy(self.pi))
class GIGAWoLFAgent(QAgent):
def __init__(self, id_, action_num, env, eta=0.01, **kwargs):
super().__init__(id_, action_num, env, **kwargs)
self.name = 'giga-wolf'
self.eta = eta
self.pi_history = [deepcopy(self.pi)]
def update_policy(self, s, a, game):
V = np.dot(self.pi[s], self.Q[s])
delta_hat_A = np.zeros(self.action_num)
delta_A = np.zeros(self.action_num)
for ai in range(self.action_num):
if self.pi[s][ai] == 1:
delta_hat_A[ai]= self.Q[s][ai] - V
else:
delta_hat_A[ai] = (self.Q[s][ai] - V) / (1 - self.pi[s][ai])
delta_A[ai] = delta_hat_A[ai] - self.gamma * abs(delta_hat_A[ai]) *self.pi[s][ai]
self.pi[s] += self.eta * delta_A
StationaryAgent.normalize(self.pi[s])
self.pi_history.append(deepcopy(self.pi))
class EMAQAgent(QAgent):
def __init__(self, id_, action_num, env, delta1=0.001, delta2=0.002, **kwargs):
super().__init__(id_, action_num, env, **kwargs)
self.name = 'emaq'
self.delta1 = delta1
self.delta2 = delta2
self.pi_history = [deepcopy(self.pi)]
def update_policy(self, s, a, game):
if a == np.argmax(self.Q[s]):
delta = self.delta1
vi = np.zeros(self.action_num)
vi[a] = 1.
else:
delta = self.delta2
vi = np.zeros(self.action_num)
vi[a] = 0.
self.pi[s] = (1 - delta) * self.pi[s] + delta * vi
StationaryAgent.normalize(self.pi[s])
self.pi_history.append(deepcopy(self.pi))
class OMQAgent(QAgent):
def __init__(self, id_, action_num, env, **kwargs):
super().__init__(id_, action_num, env, **kwargs)
self.name = 'omq'
self.count_SO = defaultdict(partial(np.zeros, self.action_num))
self.opponent_pi = defaultdict(partial(np.random.dirichlet, [1.0] * self.action_num))
self.pi_history = [deepcopy(self.pi)]
self.opponent_pi_history = [deepcopy(self.opponent_pi)]
self.Q = defaultdict(partial(np.random.rand, *(self.action_num, self.action_num)))
self.R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
self.count_R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
def update(self, s, a, o, r, s2, env, done=False):
self.count_SO[s][o] += 1.
self.opponent_pi[s] = self.count_SO[s] / np.sum(self.count_SO[s])
self.count_R[s][a][o] += 1.0
self.R[s][a][o] += (r - self.R[s][a][o]) / self.count_R[s][a][o]
Q = self.Q[s]
V = self.val(s2)
decay_alpha = self.step_decay()
if done:
Q[a][o] = Q[a][o] + decay_alpha * (r - Q[a])
else:
Q[a][o] = Q[a][o] + decay_alpha * (r + self.gamma * V - Q[a][o])
if self.verbose:
print(self.epoch)
self.update_policy(s, a, env)
self.record_policy(s, env)
self.epoch += 1
def val(self, s):
return np.max(np.dot(self.Q[s], self.opponent_pi[s]))
def update_policy(self, s, a, game):
# print('Qs {}'.format(self.Q[s]))
# print('OPI {}'.format(self.opponent_best_pi[s]))
# print('pis: ' + str(np.dot(self.Q[s], self.opponent_best_pi[s])))
self.pi[s] = utils.softmax(np.dot(self.Q[s], self.opponent_pi[s]))
# print('pis: ' + str(np.sum(np.dot(self.Q[s], self.opponent_best_pi[s]))))
self.pi_history.append(deepcopy(self.pi))
self.opponent_pi_history.append(deepcopy(self.opponent_pi))
if self.verbose:
print('opponent pi of {}: {}'.format(self.id_, self.opponent_pi[s]))
class RRQAgent(QAgent):
def __init__(self, id_, action_num, env, phi_type='count', a_policy='softmax', **kwargs):
super().__init__(id_, action_num, env, **kwargs)
self.name = 'RR2Q'
self.phi_type = phi_type
self.a_policy = a_policy
self.count_AOS = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
self.count_OS = defaultdict(partial(np.zeros, (self.action_num, )))
self.opponent_best_pi = defaultdict(partial(np.random.dirichlet, [1.0] * self.action_num))
self.pi_history = [deepcopy(self.pi)]
self.opponent_best_pi_history = [deepcopy(self.opponent_best_pi)]
self.Q = defaultdict(partial(np.random.rand, *(self.action_num, self.action_num)))
self.Q_A = defaultdict(partial(np.random.rand, self.action_num))
self.R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
self.count_R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
def update(self, s, a, o, r, s2, env, done=False, tau=0.5):
self.count_AOS[s][a][o] += 1.0
self.count_OS[s][o] += 1.
decay_alpha = self.step_decay()
if self.phi_type == 'count':
count_sum = np.reshape(np.repeat(np.sum(self.count_AOS[s], 1), self.action_num), (self.action_num, self.action_num))
self.opponent_best_pi[s] = self.count_AOS[s] / (count_sum + 0.1)
self.opponent_best_pi[s] = self.opponent_best_pi[s] / (np.sum(self.opponent_best_pi[s]) + 0.1)
elif self.phi_type == 'norm-exp':
self.Q_A_reshaped = np.reshape(np.repeat(self.Q_A[s], self.action_num), (self.action_num, self.action_num))
self.opponent_best_pi[s] = np.log(np.exp((self.Q[s] - self.Q_A_reshaped)))
self.opponent_best_pi[s] = self.opponent_best_pi[s] / np.reshape(
np.repeat(np.sum(self.opponent_best_pi[s], 1), self.action_num), (self.action_num, self.action_num))
self.count_R[s][a][o] += 1.0
self.R[s][a][o] += (r - self.R[s][a][o]) / self.count_R[s][a][o]
Q = self.Q[s]
V = self.val(s2)
if done:
Q[a][o] = Q[a][o] + decay_alpha * (r - Q[a][o])
self.Q_A[s][a] = self.Q_A[s][a] + decay_alpha * (r - self.Q_A[s][a])
else:
Q[a][o] = Q[a][o] + decay_alpha * (r + self.gamma * V - Q[a][o])
self.Q_A[s][a] = self.Q_A[s][a] + decay_alpha * (r + self.gamma * V - self.Q_A[s][a])
if self.verbose:
print(self.epoch)
self.update_policy(s, a, env)
self.record_policy(s, env)
self.epoch += 1
def val(self, s):
return np.max(np.sum(np.multiply(self.Q[s], self.opponent_best_pi[s]), 1))
def update_policy(self, s, a, game):
if self.a_policy == 'softmax':
self.pi[s] = utils.softmax(np.sum(np.multiply(self.Q[s], self.opponent_best_pi[s]), 1))
else:
Q = np.sum(np.multiply(self.Q[s], self.opponent_best_pi[s]), 1)
self.pi[s] = (Q == np.max(Q)).astype(np.double)
self.pi_history.append(deepcopy(self.pi))
self.opponent_best_pi_history.append(deepcopy(self.opponent_best_pi))
if self.verbose:
print('opponent pi of {}: {}'.format(self.id_, self.opponent_best_pi))
class GRRQAgent(QAgent):
def __init__(self, id_, action_num, env, k=0, phi_type='count', a_policy='softmax', **kwargs):
super().__init__(id_, action_num, env, **kwargs)
self.name = 'GRRQ'
self.k = k
self.phi_type = phi_type
self.a_policy = a_policy
self.count_AOS = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
self.opponent_best_pi = defaultdict(partial(np.random.dirichlet, [1.0] * self.action_num))
self.pi_history = [deepcopy(self.pi)]
self.opponent_best_pi_history = [deepcopy(self.opponent_best_pi)]
self.Q = defaultdict(partial(np.random.rand, *(self.action_num, self.action_num)))
self.Q_A = defaultdict(partial(np.random.rand, self.action_num))
self.R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
self.count_R = defaultdict(partial(np.zeros, (self.action_num, self.action_num)))
def update(self, s, a, o, r, s2, env, done=False):
self.count_AOS[s][a][o] += 1.0
decay_alpha = self.step_decay()
if self.phi_type == 'count':
count_sum = np.reshape(np.repeat(np.sum(self.count_AOS[s], 1), self.action_num), (self.action_num, self.action_num))
self.opponent_best_pi[s] = self.count_AOS[s] / (count_sum + 0.1)
elif self.phi_type == 'norm-exp':
self.Q_A_reshaped = np.reshape(np.repeat(self.Q_A[s], self.action_num), (self.action_num, self.action_num))
self.opponent_best_pi[s] = np.log(np.exp(self.Q[s] - self.Q_A_reshaped))
self.opponent_best_pi[s] = self.opponent_best_pi[s] / np.reshape(
np.repeat(np.sum(self.opponent_best_pi[s], 1), self.action_num), (self.action_num, self.action_num))
self.count_R[s][a][o] += 1.0
self.R[s][a][o] += (r - self.R[s][a][o]) / self.count_R[s][a][o]
Q = self.Q[s]
V = self.val(s2)
if done:
Q[a][o] = Q[a][o] + decay_alpha * (r - Q[a][o])
self.Q_A[s][a] = self.Q_A[s][a] + decay_alpha * (r - self.Q_A[s][a])
else:
Q[a][o] = Q[a][o] + decay_alpha * (r + self.gamma * V - Q[a][o])
self.Q_A[s][a] = self.Q_A[s][a] + decay_alpha * (r + self.gamma * V - self.Q_A[s][a])
print(self.epoch)
self.update_policy(s, a, env)
self.record_policy(s, env)
self.epoch += 1
def val(self, s):
return np.max(np.sum( | np.multiply(self.Q[s], self.opponent_best_pi[s]) | numpy.multiply |
# stdlib imports
from configobj import ConfigObj
import os
import shutil
import numpy as np
import tempfile
import urllib
import re
import json
from argparse import Namespace
from zipfile import ZipFile
import warnings
# local imports
from mapio.shake import getHeaderData
from mapio.gdal import GDALGrid
from mapio.gmt import GMTGrid
from mapio.geodict import GeoDict
from impactutils.io.cmd import get_command_output
from mapio.shake import ShakeGrid
from gfail.conf import correct_config_filepaths
import gfail.logisticmodel as LM
from gfail.godt import godt2008
from gfail.webpage import hazdev, create_kmz
from gfail.utilities import (
get_event_comcat, parseConfigLayers,
text_to_json, savelayers, getFileType)
from libcomcat.search import get_event_by_id
def run_gfail(args):
"""Runs ground failure.
Args:
args: dictionary or argument parser Namespace output by bin/gfail
program.
Returns:
list: Names of created files.
"""
# TODO: ADD CONFIG VALIDATION STEP THAT MAKES SURE ALL THE FILES EXIST
filenames = []
# If args is a dictionary, convert to a Namespace
if isinstance(args, dict):
args = Namespace(**args)
if args.set_default_paths:
set_default_paths(args)
print('default paths set, continuing...\n')
if args.list_default_paths:
list_default_paths()
return
if args.reset_default_paths:
reset_default_paths()
return
if args.make_webpage:
# Turn on GIS and HDF5 flags
gis = True
hdf5 = True
kmz = True
else:
gis = args.gis
hdf5 = args.hdf5
kmz = args.kmz
# Figure out what models will be run
if args.shakefile is not None: # user intends to actually run some models
shakefile = args.shakefile
# make output location for things
if args.output_filepath is None:
outdir = os.getcwd()
else:
outdir = args.output_filepath
if hdf5 or gis or kmz:
if not os.path.exists(outdir):
os.makedirs(outdir)
# download if is url
# cleanup = False
if not os.path.isfile(shakefile):
if isURL(shakefile):
# getGridURL returns a named temporary file object
shakefile = getGridURL(shakefile)
# cleanup = True # Be sure to delete it after
else:
raise NameError('Could not find "%s" as a file or a valid url'
% shakefile)
eventid = getHeaderData(shakefile)[0]['event_id']
# Get entire path so won't break if running gfail with relative path
shakefile = os.path.abspath(shakefile)
if args.extract_contents:
outfolder = outdir
else: # Nest in a folder named by eventid
outfolder = os.path.join(outdir, eventid)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
# Copy shake grid into output directory
# --- this is base on advice from Mike that when running in production
# the shake grids are not archived and so if we need/want to have
# the exact grid used for the calculation later if there's every a
# question about how the calculation was done, the safest thing is
# to store a copy of it here.
shake_copy = os.path.join(outfolder, "grid.xml")
shutil.copyfile(shakefile, shake_copy)
if args.uncertfile is not None:
uncertfile = os.path.abspath(args.uncertfile)
unc_copy = os.path.join(outfolder, "uncertainty.xml")
shutil.copyfile(uncertfile, unc_copy)
else:
uncertfile = None
# Write shakefile to a file for use later
shakename = os.path.join(outfolder, "shakefile.txt")
shake_file = open(shakename, "wt")
shake_file.write(shake_copy)
shake_file.close()
filenames.append(shakename)
# Check that shakemap bounds do not cross 180/-180 line
if args.set_bounds is None:
sd = ShakeGrid.getFileGeoDict(shakefile)
if sd.xmin > sd.xmax:
print('\nShakeMap crosses 180/-180 line, setting bounds so '
'only side with more land area is run')
if sd.xmax + 180. > 180 - sd.xmin:
set_bounds = '%s, %s, %s, %s' % (
sd.ymin, sd.ymax, -180., sd.xmax)
else:
set_bounds = '%s, %s, %s, %s' % (sd.ymin, sd.ymax, sd.xmin,
180.)
print('Bounds applied: %s' % set_bounds)
else:
set_bounds = args.set_bounds
else:
set_bounds = args.set_bounds
config = args.config
if args.config_filepath is not None:
# only add config_filepath if full filepath not given and file
# ext is .ini
if (not os.path.isabs(config) and
os.path.splitext(config)[-1] == '.ini'):
config = os.path.join(args.config_filepath, config)
if os.path.splitext(config)[-1] == '.ini':
temp = ConfigObj(config)
if len(temp) == 0:
raise Exception(
'Could not find specified .ini file: %s' % config)
if args.data_path is not None:
temp = correct_config_filepaths(args.data_path, temp)
configs = [temp]
conffail = []
else:
# input is a list of config files
f = open(config, 'r')
configlist = f.readlines()
configs = []
conffail = []
for conf in configlist:
conf = conf.strip()
if not os.path.isabs(conf):
# only add config_filepath if full filepath not given
conf = os.path.join(args.config_filepath, conf)
try:
temp = ConfigObj(conf)
if temp:
if args.data_path is not None:
temp = correct_config_filepaths(
args.data_path, temp)
configs.append(temp)
else:
conffail.append(conf)
except BaseException:
conffail.append(conf)
print('\nRunning the following models:')
for conf in configs:
print('\t%s' % conf.keys()[0])
if len(conffail) > 0:
print('Could not find or read in the following config files:\n')
for conf in conffail:
print('\t%s' % conf)
print('\nContinuing...\n')
if set_bounds is not None:
if 'zoom' in set_bounds:
temp = set_bounds.split(',')
print('Using %s threshold of %1.1f to cut model bounds'
% (temp[1].strip(), float(temp[2].strip())))
bounds = get_bounds(shakefile, temp[1].strip(),
float(temp[2].strip()))
else:
temp = eval(set_bounds)
latmin = temp[0]
latmax = temp[1]
lonmin = temp[2]
lonmax = temp[3]
bounds = {'xmin': lonmin, 'xmax': lonmax,
'ymin': latmin, 'ymax': latmax}
print('Applying bounds of lonmin %1.2f, lonmax %1.2f, '
'latmin %1.2f, latmax %1.2f'
% (bounds['xmin'], bounds['xmax'],
bounds['ymin'], bounds['ymax']))
else:
bounds = None
if args.make_webpage:
results = []
# pre-read in ocean trimming file polygons so only do this step once
if args.trimfile is not None:
if not os.path.exists(args.trimfile):
print('trimfile defined does not exist: %s\n'
'Ocean will not be trimmed.' % args.trimfile)
trimfile = None
elif os.path.splitext(args.trimfile)[1] != '.shp':
print('trimfile must be a shapefile, '
'ocean will not be trimmed')
trimfile = None
else:
trimfile = args.trimfile
else:
trimfile = None
# Get finite fault ready, if exists
ffault = None
point = True
if args.finite_fault is not None:
point = False
try:
if os.path.splitext(args.finite_fault)[-1] == '.txt':
ffault = text_to_json(args.finite_fault)
elif os.path.splitext(args.finite_fault)[-1] == '.json':
ffault = args.finite_fault
else:
print('Could not read in finite fault, will '
'try to download from comcat')
ffault = None
except BaseException:
print('Could not read in finite fault, will try to '
'download from comcat')
ffault = None
if ffault is None:
# Try to get finite fault file, if it exists
try:
returned_ev = get_event_comcat(shakefile)
if returned_ev is not None:
testjd, detail, temp = returned_ev
evinfo = testjd['input']['event_information']
if 'faultfiles' in evinfo:
ffilename = evinfo['faultfiles']
if len(ffilename) > 0:
# Download the file
with tempfile.NamedTemporaryFile(
delete=False, mode='w') as f:
temp.getContent(ffilename, filename=f.name)
ffault = text_to_json(f.name)
os.remove(f.name)
point = False
else:
point = True
else:
print('Unable to determine source type, unknown if finite'
' fault or point source')
ffault = None
point = False
except Exception as e:
print(e)
print('Unable to determine source type, unknown if finite'
' fault or point source')
ffault = None
point = False
# Loop over config files
for conf in configs:
modelname = conf.keys()[0]
print('\nNow running %s:' % modelname)
notcov, newbnds = check_input_extents(
conf, shakefile=shakefile,
bounds=bounds
)
if len(notcov) > 0:
print('\nThe following input layers do not cover'
' the area of interest:\n\t%s' % '\n\t'.join(notcov))
if newbnds is None:
print('\nCannnot make bounds that work. '
'Skipping to next model\n')
continue
else:
pnt = '%s, %s, %s, %s' % (
newbnds['xmin'], newbnds['xmax'],
newbnds['ymin'], newbnds['ymax'])
print('Running model for new bounds that are fully covered'
' by input layer: %s' % pnt)
bounds2 = newbnds
else:
bounds2 = bounds
modelfunc = conf[modelname]['funcname']
if modelfunc == 'LogisticModel':
lm = LM.LogisticModel(shakefile, conf,
uncertfile=uncertfile,
saveinputs=args.save_inputs,
bounds=bounds2,
trimfile=trimfile)
maplayers = lm.calculate()
elif modelfunc == 'godt2008':
maplayers = godt2008(shakefile, conf,
uncertfile=uncertfile,
saveinputs=args.save_inputs,
bounds=bounds2,
trimfile=trimfile)
else:
print('Unknown model function specified in config for %s '
'model, skipping to next config' % modelfunc)
continue
# time1 = datetime.datetime.utcnow().strftime('%d%b%Y_%H%M')
# filename = ('%s_%s_%s' % (eventid, modelname, time1))
if args.appendname is not None:
filename = ('%s_%s_%s' % (eventid, modelname, args.appendname))
else:
filename = ('%s_%s' % (eventid, modelname))
if hdf5:
filenameh = filename + '.hdf5'
if os.path.exists(filenameh):
os.remove(filenameh)
savelayers(maplayers, os.path.join(outfolder, filenameh))
filenames.append(filenameh)
if gis or kmz:
for key in maplayers:
# Rename 'std' key to 'beta_sigma'
if key == 'std':
key_label = 'beta_sigma'
else:
key_label = key
if gis:
filen = os.path.join(outfolder, '%s_%s.bil'
% (filename, key_label))
fileh = os.path.join(outfolder, '%s_%s.hdr'
% (filename, key_label))
fileg = os.path.join(outfolder, '%s_%s.tif'
% (filename, key_label))
GDALGrid.copyFromGrid(
maplayers[key]['grid']).save(filen)
cflags = '-co COMPRESS=DEFLATE -co predictor=2'
srs = '-a_srs EPSG:4326'
cmd = 'gdal_translate %s %s -of GTiff %s %s' % (
srs, cflags, filen, fileg)
rc, so, se = get_command_output(cmd)
# Delete bil file and its header
os.remove(filen)
os.remove(fileh)
filenames.append(fileg)
if kmz and not key.startswith('quantile'):
plotorder, logscale, lims, colormaps, maskthresh = \
parseConfigLayers(maplayers, conf, keys=['model'])
maxprob = np.nanmax(maplayers[key]['grid'].getData())
if key == 'model':
qdict = {
k: maplayers[k] for k in maplayers.keys()
if k.startswith('quantile')
}
else:
qdict = None
if maskthresh is None:
maskthresh = [0.]
if maxprob >= maskthresh[0]:
filen = os.path.join(outfolder, '%s_%s.kmz'
% (filename, key_label))
filek = create_kmz(maplayers[key], filen,
mask=maskthresh[0],
levels=lims[0],
qdict=qdict)
filenames.append(filek)
else:
print('No unmasked pixels present, skipping kmz '
'file creation')
if args.make_webpage:
# Compile into list of results for later
results.append(maplayers)
# # Make binary output for ShakeCast
# filef = os.path.join(outfolder, '%s_model.flt'
# % filename)
# # And get name of header
# filefh = os.path.join(outfolder, '%s_model.hdr'
# % filename)
# # Make file
# write_floats(filef, maplayers['model']['grid'])
# filenames.append(filef)
# filenames.append(filefh)
eventid = getHeaderData(shakefile)[0]['event_id']
if not hasattr(args, 'eventsource'):
args.eventsource = 'us'
if not hasattr(args, 'eventsourcecode'):
args.eventsourcecode = eventid
if args.make_webpage:
if len(results) == 0:
raise Exception('No models were run. Cannot make webpages.')
outputs = hazdev(
results, configs,
shakefile, outfolder=outfolder,
pop_file=args.popfile,
pager_alert=args.property_alertlevel,
eventsource=args.eventsource,
eventsourcecode=args.eventsourcecode,
point=point, gf_version=args.gf_version,
pdlcall=args.pdlcall)
filenames = filenames + outputs
# # create transparent png file
# outputs = create_png(outdir)
# filenames = filenames + outputs
#
# # create info file
# infofile = create_info(outdir)
# filenames = filenames + infofile
print('\nFiles created:\n')
for filen in filenames:
print('%s' % filen)
return filenames
def getGridURL(gridurl, fname=None):
"""
Args:
gridurl (str): url for Shakemap grid.xml file.
fname (str): file location name, if None, will create a temporary file
Returns:
file object corresponding to the url.
"""
with urllib.request.urlopen(gridurl) as fh:
data = fh.read().decode('utf-8')
if fname is None:
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
f.write(data)
else:
with open(fname, 'w') as f:
f.write(data)
return f.name
def getShakefiles(event, outdir, uncert=False, version=None,
source='preferred'):
"""
Download the shakemap grid.xml file and the
Args:
event event id or URL
"""
shakefile = os.path.join(outdir, 'grid.xml')
if uncert:
uncertfile = os.path.join(outdir, 'uncertainty.xml')
else:
uncertfile = None
if version is not None:
includeSuperseded = True
else:
includeSuperseded = False
# If args.event is a url to a shakemap, download from that url
if isURL(event):
if version is not None or source != 'preferred':
raise Exception('Cannot set shakemap version or source when URL '
'of gridfile is provided')
try:
shakefile = getGridURL(event, shakefile)
except Exception as e:
raise Exception('Could not download shakemap file from provided '
'URL: %s' % e)
# Now get corresponding event detail
event = getHeaderData(shakefile)[0]['event_id']
version = getHeaderData(shakefile)[0]['shakemap_version']
source = getHeaderData(shakefile)[0]['shakemap_originator']
try:
detail = get_event_by_id(
event, includesuperseded=includeSuperseded)
except BaseException:
# Maybe originator is missing from event id, try another way
try:
temp = getHeaderData(shakefile)[0]
temp2 = '%s%s' % (
temp['shakemap_originator'], temp['shakemap_id'])
detail = get_event_by_id(
temp2, includesuperseded=includeSuperseded)
event = temp2
except Exception as e:
msg = 'Could not get event detail for shakemap at provided URL: %s'
print(msg % e)
else:
detail = get_event_by_id(event, includesuperseded=includeSuperseded)
# Get most recent version
if version is None: # Get current preferred
shakemap = detail.getProducts('shakemap', source=source)[0]
shakemap.getContent('grid.xml', shakefile)
# or get version requested
else:
allversions = detail.getProducts('shakemap', version='all',
source=source)
# First try with properties, more reliable
vers = []
for allv in allversions:
if 'version' in allv.properties:
vers.append(int(allv['version']))
else:
vers.append(-999)
idx = np.where(np.array(vers) == version)[0]
if len(idx) < 1:
# Try using libcomcat version, less reliable...
vers = [allv.version for allv in allversions]
idx = np.where(np.array(vers) == version)[0]
if len(idx) == 1:
# Check info.json to make sure it's right version
infobytes, url = allversions[idx[0]
].getContentBytes('info.json')
info = json.loads(infobytes.decode('utf-8'))
if info['processing']['shakemap_versions']['map_version'] != version:
idx = []
if len(idx) < 1:
msg = 'Could not find version %d of Shakemap from source %s'
raise Exception(msg % (version, source))
if len(idx) > 1:
msg = 'Found more than one ShakeMap with matching source and version. \
Choosing first one.'
warnings.warn(msg)
print(msg)
shakemap = allversions[idx[0]]
shakemap.getContent('grid.xml', shakefile)
if uncert:
uncertfile = getUncert(shakemap, uncertfile)
return detail, shakefile, uncertfile
def getUncert(shakemap, fname=None):
"""
download and unzip (if needed) the uncertainty grid corresponding to a
shakemap
Args:
shakemap: libcomcat ShakeMap product class for the event and version
fname (str): file location name, if None, will create a temporary file
Returns:
file object corresponding to the url.
"""
grid_url = shakemap.getContentURL('uncertainty.*')
if grid_url is None:
return None
try:
ext = grid_url.split('.')[-1]
if fname is None:
basedir = tempfile.mkdtemp()
else:
basedir = os.path.dirname(fname)
uncertfile = os.path.join(basedir, 'uncertainty.xml')
if ext == 'xml':
shakemap.getContent('uncertainty.xml', uncertfile)
if ext == 'zip':
fname = os.path.join(basedir, 'uncertainty.xml.zip')
shakemap.getContent('uncertainty.xml.zip', fname)
# urllib.request.urlretrieve(grid_url, fname)
with ZipFile(fname, 'r') as zip1:
# See if it's inside a file structure
out = zip1.filelist[0]
# Extract all the contents of zip file in different directory
zip1.extractall(basedir)
# move file uncertainty.xml file to base dir if it was in a
# weird subdir
if os.path.isdir(os.path.dirname(out.filename)):
os.replace(os.path.join(basedir, out.filename), uncertfile)
except Exception as e:
uncertfile = None
print('Unable to download uncertainty.xml: %s' % e)
return uncertfile
def isURL(gridurl):
"""
This function determines if the provided string is a valid url
Args:
gridurl (str): url to check.
Returns:
bool: True if gridurl is a valid url, False otherwise.
"""
is_url = False
try:
urllib.request.urlopen(gridurl)
is_url = True
except BaseException:
pass
return is_url
def set_default_paths(args):
"""
Creates a file called .gfail_defaults that contains default path
information to simplify running gfail. Can be overwritten by any manually
entered paths. This updates any existing .gfail_defaults file. If
args.data_path is 'reset' then any existing defaults will be removed.
Args:
args (arparser Namespace): Input arguments.
Returns:
Updates .gfail_defaults file on users path, or creates new one if
file does not already exist.
"""
filename = os.path.join(os.path.expanduser('~'), '.gfail_defaults')
if os.path.exists(filename):
D = ConfigObj(filename)
else:
D = {}
if args.data_path is not None:
if args.data_path == 'reset':
D.pop('data_path')
else:
# check that it's a valid path
if os.path.exists(args.data_path):
D.update({'data_path': args.data_path})
else:
print('Path given for data_path does not exist: %s'
% args.data_path)
if args.output_filepath is not None:
if args.output_filepath == 'reset':
D.pop('output_filepath')
else:
# check that it's a valid path
if os.path.exists(args.output_filepath):
D.update({'output_filepath': args.output_filepath})
else:
print('Path given for output_filepath does not exist: %s'
% args.output_filepath)
if args.config_filepath is not None:
if args.config_filepath == 'reset':
D.pop('config_filepath')
else:
# check that it's a valid path
if os.path.exists(args.config_filepath):
D.update({'config_filepath': args.config_filepath})
else:
print('Path given for config_filepath does not exist: %s'
% args.config_filepath)
if args.popfile is not None:
if args.popfile == 'reset':
D.pop('popfile')
else:
# check that it's a valid path
if os.path.exists(args.popfile):
D.update({'popfile': args.popfile})
else:
print('Path given for population file does not exist: %s'
% args.popfile)
if args.trimfile is not None:
if args.trimfile == 'reset':
D.pop('trim')
else:
# check that it's a valid path and that it's a shapefile
if os.path.exists(args.trimfile):
filename4, fileextension = os.path.splitext(args.trimfile)
if fileextension == '.shp':
D.update({'trimfile': args.trimfile})
else:
print('Ocean trimming file is not a shapefile: %s'
% args.trimfile)
else:
print('Path given for ocean trimming file does not exist: %s'
% args.trimfile)
if args.pdl_config is not None:
if args.pdl_config == 'reset':
D.pop('pdl_config')
else:
# check that it's a valid path
if os.path.exists(args.pdl_config):
D.update({'pdl_config': args.pdl_config})
else:
print('Path given for pdl config file does not exist: %s'
% args.pdl_config)
if args.log_filepath is not None:
if args.log_filepath == 'reset':
D.pop('log_filepath')
else:
# check that it's a valid path
if os.path.exists(args.log_filepath):
D.update({'log_filepath': args.log_filepath})
else:
print('Path given for log file does not exist: %s'
% args.log_filepath)
if args.dbfile is not None:
if args.dbfile == 'reset':
D.pop('dbfile')
else:
# check that it's a valid path (file itself doesnt have to exist)
if os.path.exists(os.path.dirname(args.dbfile)):
D.update({'dbfile': args.dbfile})
else:
print('Path given for database file does not exist: %s'
% args.dbfile)
print('New default paths set.\n')
if D:
C = ConfigObj(D)
C.filename = filename
C.write()
list_default_paths()
else:
print('no defaults set because no paths were input\n')
def list_default_paths():
"""
Lists all default paths currently set.
"""
filename = os.path.join(os.path.expanduser('~'), '.gfail_defaults')
if os.path.exists(filename):
D = ConfigObj(filename)
print('Default paths currently set to:\n')
for key in D:
print('\t%s = %s' % (key, D[key]))
else:
print('No default paths currently set\n')
def reset_default_paths():
"""
Clear default path file
"""
filename = os.path.join(os.path.expanduser('~'), '.gfail_defaults')
if os.path.exists(filename):
os.remove(filename)
print('Default paths cleared\n')
else:
print('No default paths currently set\n')
def get_bounds(shakefile, parameter='pga', threshold=2.0):
"""
Get the boundaries of the shakemap that include all areas with shaking
above the defined threshold.
Args:
shakefile (str): Path to shakemap file.
parameter (str): Either 'pga' or 'pgv'.
threshold (float): Minimum value of parameter of interest, in units
of %g for pga and cm/s for pgv. The default value of 2% g is based
on minimum pga threshold ever observed to have triggered landslides
by <NAME> Harp (2016).
Returns:
dict: A dictionary with keys 'xmin', 'xmax', 'ymin', and 'ymax' that
defines the boundaries in geographic coordinates.
"""
shakemap = ShakeGrid.load(shakefile, adjust='res')
if parameter == 'pga':
vals = shakemap.getLayer('pga')
elif parameter == 'pgv':
vals = shakemap.getLayer('pgv')
else:
raise Exception('parameter not valid')
xmin, xmax, ymin, ymax = vals.getBounds()
lons = np.linspace(xmin, xmax, vals.getGeoDict().nx)
lats = np.linspace(ymax, ymin, vals.getGeoDict().ny)
row, col = np.where(vals.getData() > float(threshold))
lonmin = lons[col].min()
lonmax = lons[col].max()
latmin = lats[row].min()
latmax = lats[row].max()
# dummy fillers, only really care about bounds
boundaries1 = {'dx': 100, 'dy': 100., 'nx': 100., 'ny': 100}
if xmin < lonmin:
boundaries1['xmin'] = lonmin
else:
boundaries1['xmin'] = xmin
if xmax > lonmax:
boundaries1['xmax'] = lonmax
else:
boundaries1['xmax'] = xmax
if ymin < latmin:
boundaries1['ymin'] = latmin
else:
boundaries1['ymin'] = ymin
if ymax > latmax:
boundaries1['ymax'] = latmax
else:
boundaries1['ymax'] = ymax
return boundaries1
def check_input_extents(config, shakefile=None, bounds=None):
"""Make sure all input files exist and cover the extent desired
Args:
config: configObj of a single model
shakefile: path to ShakeMap grid.xml file (used for bounds). If not
provided, bounds must be provided
bounds: dictionary of bounds with keys: 'xmin', 'xmax', 'ymin', 'ymax'
Returns:
tuple containing:
notcovered: list of files that do not cover the entire area
defined by bounds or shakefile
newbounds: new dictionary of bounds of subarea of original
bounds or shakefile extent that is covered by all input files
"""
if shakefile is None and bounds is None:
raise Exception('Must define either a shakemap file or bounds')
modelname = config.keys()[0]
# Make dummy geodict to use
if bounds is None:
evdict = ShakeGrid.getFileGeoDict(shakefile)
else:
evdict = GeoDict.createDictFromBox(
bounds['xmin'], bounds['xmax'],
bounds['ymin'], bounds['ymax'],
0.00001, 0.00001, inside=False)
# Check extents of all input layers
notcovered = []
notcovgdicts = []
newbounds = None
for item, value in config[modelname]['layers'].items():
if 'file' in value.keys():
filelook = value['file']
if getFileType(filelook) == 'gmt':
tmpgd, _ = GMTGrid.getFileGeoDict(filelook)
else:
tmpgd, _ = GDALGrid.getFileGeoDict(filelook)
# See if tempgd contains evdict
contains = tmpgd.contains(evdict)
if not contains:
notcovered.append(filelook)
notcovgdicts.append(tmpgd)
# print(filelook)
if len(notcovered) > 0:
# Figure out what bounds COULD be run
xmins = [gd.xmin for gd in notcovgdicts]
xmaxs = [gd.xmax for gd in notcovgdicts]
ymins = [gd.ymin for gd in notcovgdicts]
ymaxs = [gd.ymax for gd in notcovgdicts]
# Set in by a buffer of 0.05 degrees because mapio doesn't like
# when bounds are exactly the same for getboundswithin
newbounds = dict(xmin=evdict.xmin + 0.05,
xmax=evdict.xmax - 0.05,
ymin=evdict.ymin + 0.05,
ymax=evdict.ymax - 0.05)
# Which one is the problem?
if evdict.xmin < | np.max(xmins) | numpy.max |
import numpy as np
import matplotlib.pyplot as plt
import os
import pydicom as pyd
from glob import glob
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
import bisect
import random
import math
from Augmentor import *
def import_dicom_data(path):
data_path = path + '/images/'
annot_path = path + '/labels/'
data_list = glob(data_path + '*.dcm')
annot_list = glob(annot_path + '*.dcm')
N = len(data_list)
data = []
annot = []
annot_frames = np.zeros((N))
print('Data Image Resolutions')
for i in range(N):
x = pyd.read_file(data_list[i]).pixel_array
x = x[:len(x) / 2]
y = pyd.read_file(annot_list[i]).pixel_array
y = y[:len(y) / 2]
n_frame = 0
for j in range(y.shape[0]):
if np.where(y[j] == 1)[0].size > 0:
n_frame += 1
annot_frames[i] = n_frame
print(x.shape, n_frame)
data.append(x)
annot.append(y)
return data, annot
def zeropad(data, annot, h_max, w_max):
# If the data is a list of images of different resolutions
# useful in testing
if isinstance(data, list):
n = len(data)
data_pad = np.zeros((n, h_max, w_max))
annot_pad = np.zeros((n, h_max, w_max))
for i in range(n):
pad_l1 = (h_max - data[i].shape[0]) // 2
pad_l2 = (h_max - data[i].shape[0]) - (h_max - data[i].shape[0]) // 2
pad_h1 = (w_max - data[i].shape[1]) // 2
pad_h2 = (w_max - data[i].shape[1]) - (w_max - data[i].shape[1]) // 2
data_pad[i] = np.pad(data[i], ((pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0)))
annot_pad[i] = np.pad(annot[i], ((pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0)))
# If data is a numpy array with images of same resolution
else:
pad_l1 = (h_max - data.shape[1]) // 2
pad_l2 = (h_max - data.shape[1]) - (h_max - data.shape[1]) // 2
pad_h1 = (w_max - data.shape[2]) // 2
pad_h2 = (w_max - data.shape[2]) - (w_max - data.shape[2]) // 2
data_pad = np.pad(data, ((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0), (0, 0)))
annot_pad = np.pad(annot, ((0, 0), (pad_l1, pad_l2), (pad_h1, pad_h2)), 'constant',
constant_values=((0, 0), (0, 0), (0, 0)))
return data_pad, annot_pad
def data_augment(imgs, lb):
p = Pipeline()
p.rotate(probability=0.7, max_left_rotation=10, max_right_rotation=10)
imgs_temp, lb_temp = np.zeros(imgs.shape), np.zeros(imgs.shape)
for i in range(imgs.shape[0]):
pil_images = p.sample_with_array(imgs[i], ground_truth=lb[i], mode='L')
imgs_temp[i], lb_temp[i] = | np.asarray(pil_images[0]) | numpy.asarray |
import torch
from torch.autograd import Function
from torch.nn import Module
from constrained_sparsemax import constrained_sparsemax
import numpy as np
import pdb
np.set_printoptions(threshold=np.nan)
def project_onto_simplex(a, radius=1.0):
'''Project point a to the probability simplex.
Returns the projected point x and the residual value.'''
x0 = a.copy()
d = len(x0);
ind_sort = np.argsort(-x0)
y0 = x0[ind_sort]
ycum = np.cumsum(y0)
val = 1.0/np.arange(1,d+1) * (ycum - radius)
ind = np.nonzero(y0 > val)[0]
rho = ind[-1]
tau = val[rho]
y = y0 - tau
ind = np.nonzero(y < 0)
y[ind] = 0
x = x0.copy()
x[ind_sort] = y
return x, tau, .5*np.dot(x-a, x-a)
def constrained_softmax(z, u):
assert round(np.sum(u), 5) >= 1.0, pdb.set_trace()
assert (u>=0).all(), "Invalid: u[i]<0 for some i"
p = np.zeros_like(z)
active = np.ones_like(z)
nz = np.nonzero(u)[0]
z = z[nz]
u = u[nz]
active[nz] = 0.
z -= np.max(z)
e_z = np.exp(z)
Z = e_z.sum()
# if Z==0:
# return p, active, s
ind = np.argsort(-e_z / u)
s = 0.
for i in ind:
# Temporary fix for underflow in Z
if round(Z, 12) == 0.0: Z = 0.000001
val = e_z[i] * (1-s) / Z
if val > u[i]:
val = u[i]
Z -= e_z[i]
s += val
active[nz[i]] = 1.
p[nz[i]] = val
#if np.any(np.isnan(p)):
# import pdb; pdb.set_trace()
return p, active, s
class SoftmaxFunction(Function):
def forward(self, input):
e_z = input.exp()
Z = e_z.sum(1)
output = e_z / Z.expand_as(e_z)
self.save_for_backward(input, output)
return output
def backward(self, grad_output):
input, output = self.saved_tensors
avg = (grad_output * output).sum(1)
grad_input = output * (grad_output - avg.expand_as(grad_output))
return grad_input
class Softmax(Module):
def forward(self, input):
return SoftmaxFunction()(input)
class SparsemaxFunction(Function):
def forward(self, input):
# TODO: Make an implementation directly with torch tensors,
# not requiring numpy.
# Example:
# z_sorted, ind_sort = (-input).sort(dim=1, descending=True)
# z_cum = z_sorted.cumsum(dim=1)
# r = torch.arange(1, 1+z_sorted.size(1))
# if input.is_cuda():
# r = r.cuda()
# val = 1.0 / r.expand_as(z_cum) * (z_cum - 1.)
# ...
np_input = input.cpu().numpy()
probs = np.zeros_like(np_input)
for i in xrange(np_input.shape[0]):
probs[i,:], tau, _ = project_onto_simplex(np_input[i,:])
output = torch.from_numpy(probs)
if input.is_cuda:
output = output.cuda()
self.save_for_backward(input, output)
return output
def backward(self, grad_output):
input, output = self.saved_tensors
probs = output.cpu().numpy()
supp = np.array(probs > 0., dtype=probs.dtype)
np_grad_output = grad_output.cpu().numpy()
avg = np.sum(np_grad_output * supp, 1) / np.sum(supp, 1)
np_grad_input = supp * (np_grad_output - np.tile(avg[:,None],
[1, supp.shape[1]]))
grad_input = torch.from_numpy(np_grad_input)
if grad_output.is_cuda:
grad_input = grad_input.cuda()
return grad_input
class Sparsemax(Module):
def forward(self, input):
return SparsemaxFunction()(input)
class ConstrainedSoftmaxFunction(Function):
def forward(self, input1, input2):
z = input1.cpu().numpy()
u = input2.cpu().numpy()
probs = np.zeros_like(z)
active = np.zeros_like(z)
s = np.zeros_like(z[:,0])
for i in xrange(z.shape[0]):
probs[i,:], active[i,:], s[i] = constrained_softmax(z[i], u[i])
probs = torch.from_numpy(probs)
active = torch.from_numpy(active)
s = torch.from_numpy(s)
if input1.is_cuda:
probs = probs.cuda()
active = active.cuda()
s = s.cuda()
self.save_for_backward(probs)
self.saved_intermediate = active, s # Not sure this is safe.
return probs
#z = input1
#u = input2
#e_z = z.exp()
#Z = e_z.sum(1)
#probs = e_z / Z.expand_as(e_z)
#active = (probs > u).type(probs.type())
#s = (active * u).sum(1)
#Z = ((1. - active) * e_z).sum(1) / (1-s)
#probs = active * u + (1. - active) * (e_z / Z.expand_as(z))
#output = probs
#self.save_for_backward(output)
#self.saved_intermediate = active, s # Not sure this is safe.
#return output
def backward(self, grad_output):
output, = self.saved_tensors
active, s = self.saved_intermediate
probs = output
m = ((1. - active) * probs * grad_output).sum(1) / (1. - s)
m = m.squeeze(-1) # This is needed for back-compatibility with pytorch 0.1.x.
# If all are active, then sum(u) = 1, s = 1, p = u, so we need to do
# the following to avoid nans.
ind = active.sum(1) == active.size(1)
m[ind] = 0.
grad_z = (1. - active) * probs * \
(grad_output - m.unsqueeze(1).expand_as(active))
grad_u = active * (grad_output - m.unsqueeze(1).expand_as(active))
grad_input1 = grad_z
grad_input2 = grad_u
#if np.any(np.isnan(grad_z.cpu().numpy())):
# import pdb; pdb.set_trace()
#if np.any(np.isnan(grad_u.cpu().numpy())):
# import pdb; pdb.set_trace()
return grad_input1, grad_input2
class ConstrainedSoftmax(Module):
def forward(self, input1, input2):
return ConstrainedSoftmaxFunction()(input1, input2)
class ConstrainedSparsemaxFunction(Function):
def forward(self, input1, input2):
z = input1.cpu().numpy()
u = input2.cpu().numpy()
#print("z:", z)
#print("u:", u)
probs = np.zeros_like(z)
regions = np.zeros_like(z)
for i in xrange(z.shape[0]):
probs[i,:], regions[i,:], _, _ = constrained_sparsemax(z[i], u[i])
assert | np.all(probs[i, :] == probs[i, :]) | numpy.all |
import numpy as np
from abc import ABC, abstractmethod
from pathlib import Path
import subprocess
import numpy.ma as ma
import scipy.constants as const
from multiprocessing import Pool
from scipy.interpolate import interp1d
from dans_pymodules import Vector2D
import matplotlib.pyplot as plt
# from scipy import meshgrid
from scipy.special import iv as bessel1
from scipy.optimize import root
# import pickle
# import scipy.constants as const
# import numpy as np
# import platform
# import matplotlib.pyplot as plt
# import gc
import datetime
import time
import copy
import os
import sys
import shutil
from matplotlib.patches import Arc as Arc
load_previous = False
# Check if we can connect to a display, if not disable all plotting and windowed stuff (like gmsh)
# TODO: This does not remotely cover all cases!
if "DISPLAY" in os.environ.keys():
x11disp = True
else:
x11disp = False
# --- Try importing BEMPP
HAVE_BEMPP = False
try:
import bempp.api
from bempp.api.shapes.shapes import __generate_grid_from_geo_string as generate_from_string
HAVE_BEMPP = True
except ImportError:
print("Couldn't import BEMPP, no meshing or BEM field calculation will be possible.")
bempp = None
generate_from_string = None
# --- Try importing mpi4py, if it fails, we fall back to single processor
try:
from mpi4py import MPI
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.Get_size()
HOST = MPI.Get_processor_name()
print("Process {} of {} on host {} started!".format(RANK + 1, SIZE, HOST))
sys.stdout.flush()
except ImportError:
MPI = None
COMM = None
RANK = 0
SIZE = 1
import socket
HOST = socket.gethostname()
print("Could not import mpi4py, falling back to single core (and python multiprocessing in some instances)!")
# --- Try importing pythonocc-core
HAVE_OCC = False
try:
from OCC.Extend.DataExchange import read_stl_file
from OCC.Display.SimpleGui import init_display
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeBox, BRepPrimAPI_MakeTorus, BRepPrimAPI_MakeSweep
from OCC.Core.BRepTools import breptools_Write
from OCC.Core.BRepBndLib import brepbndlib_Add
from OCC.Core.Bnd import Bnd_Box
from OCC.Core.gp import gp_Pnt, gp_Pnt2d
from OCC.Core.BRepClass3d import BRepClass3d_SolidClassifier
from OCC.Core.TopAbs import TopAbs_ON, TopAbs_OUT, TopAbs_IN
from OCC.Core.GeomAPI import GeomAPI_Interpolate, GeomAPI_PointsToBSpline
from OCC.Core.Geom import Geom_BSplineCurve
from OCC.Core.Geom2d import Geom2d_BSplineCurve
from OCC.Core.TColgp import TColgp_HArray1OfPnt, TColgp_Array1OfPnt
from OCC.Core.TColStd import TColStd_Array1OfInteger, TColStd_Array1OfReal
from OCC.Core.GeomAbs import GeomAbs_C1, GeomAbs_C2, GeomAbs_G1
from OCC.Core.Geom2dAPI import Geom2dAPI_Interpolate, Geom2dAPI_PointsToBSpline
from OCC.Core.TColgp import TColgp_HArray1OfPnt2d, TColgp_Array1OfPnt2d
from OCCUtils.Common import *
from py_electrodes import ElectrodeObject
HAVE_OCC = True
except ImportError:
ElectrodeObject = None
print("Something went wrong during OCC import. No CAD support possible!")
USE_MULTIPROC = True # In case we are not using mpi or only using 1 processor, fall back on multiprocessing
GMSH_EXE = "/home/daniel/src/gmsh-4.0.6-Linux64/bin/gmsh"
# GMSH_EXE = "E:/gmsh4/gmsh.exe"
HAVE_TEMP_FOLDER = False
np.set_printoptions(threshold=10000)
HAVE_GMSH = True
# Quick test if gmsh path is correct
if not Path(GMSH_EXE).is_file():
print("Gmsh path seems to be wrong! No meshing will be possible!")
HAVE_GMSH = False
# For now, everything involving the pymodules with be done on master proc (RANK 0)
if RANK == 0:
from dans_pymodules import *
colors = MyColors()
else:
colors = None
decimals = 12
__author__ = "<NAME>, <NAME>"
__doc__ = """Calculate RFQ fields from loaded cell parameters"""
# Initialize some global constants
amu = const.value("atomic mass constant energy equivalent in MeV")
echarge = const.value("elementary charge")
clight = const.value("speed of light in vacuum")
# Define the axis directions and vane rotations:
X = 0
Y = 1
Z = 2
XYZ = range(3)
AXES = {"X": 0, "Y": 1, "Z": 2}
rot_map = {"yp": 0.0,
"ym": 180.0,
"xp": 270.0,
"xm": 90.0}
class Polygon2D(object):
"""
Simple class to handle polygon operations such as point in polygon or
orientation of rotation (cw or ccw), area, etc.
"""
def add_point(self, p=None):
"""
Append a point to the polygon
"""
if p is not None:
if isinstance(p, tuple) and len(p) == 2:
self.poly.append(p)
else:
print("Error in add_point of Polygon: p is not a 2-tuple!")
else:
print("Error in add_point of Polygon: No p given!")
return 0
def add_polygon(self, poly=None):
"""
Append a polygon object to the end of this polygon
"""
if poly is not None:
if isinstance(poly, Polygon2D):
self.poly.extend(poly.poly)
# if isinstance(poly.poly, list) and len(poly.poly) > 0:
#
# if isinstance(poly.poly[0], tuple) and len(poly.poly[0]) == 2:
# self.poly.extend(poly.poly)
return 0
def area(self):
"""
Calculates the area of the polygon. only works if there are no crossings
Taken from http://paulbourke.net, algorithm written by <NAME>, 1998
If area is positive -> polygon is given clockwise
If area is negative -> polygon is given counter clockwise
"""
area = 0
poly = self.poly
npts = len(poly)
j = npts - 1
i = 0
for _ in poly:
p1 = poly[i]
p2 = poly[j]
area += (p1[0] * p2[1])
area -= p1[1] * p2[0]
j = i
i += 1
area /= 2
return area
def centroid(self):
"""
Calculate the centroid of the polygon
Taken from http://paulbourke.net, algorithm written by <NAME>, 1998
"""
poly = self.poly
npts = len(poly)
x = 0
y = 0
j = npts - 1
i = 0
for _ in poly:
p1 = poly[i]
p2 = poly[j]
f = p1[0] * p2[1] - p2[0] * p1[1]
x += (p1[0] + p2[0]) * f
y += (p1[1] + p2[1]) * f
j = i
i += 1
f = self.area() * 6
return x / f, y / f
def clockwise(self):
"""
Returns True if the polygon points are ordered clockwise
If area is positive -> polygon is given clockwise
If area is negative -> polygon is given counter clockwise
"""
if self.area() > 0:
return True
else:
return False
def closed(self):
"""
Checks whether the polygon is closed (i.e first point == last point)
"""
if self.poly[0] == self.poly[-1]:
return True
else:
return False
def nvertices(self):
"""
Returns the number of vertices in the polygon
"""
return len(self.poly)
def point_in_poly(self, p=None):
"""
Check if a point p (tuple of x,y) is inside the polygon
This is called the "ray casting method": If a ray cast from p crosses
the polygon an even number of times, it's outside, otherwise inside
From: http://www.ariel.com.au/a/python-point-int-poly.html
Note: Points directly on the edge or identical with a vertex are not
considered "inside" the polygon!
"""
if p is None:
return None
poly = self.poly
x = p[0]
y = p[1]
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def remove_last(self):
"""
Remove the last tuple in the ploygon
"""
self.poly.pop(-1)
return 0
def reverse(self):
"""
Reverses the ordering of the polygon (from cw to ccw or vice versa)
"""
temp_poly = []
nv = self.nvertices()
for i in range(self.nvertices() - 1, -1, -1):
temp_poly.append(self.poly[i])
self.poly = temp_poly
return temp_poly
def rotate(self, index):
"""
rotates the polygon, so that the point with index 'index' before now has
index 0
"""
if index > self.nvertices() - 1:
return 1
for i in range(index):
self.poly.append(self.poly.pop(0))
return 0
def __init__(self, poly=None):
"""
construct a polygon object
If poly is not specified, an empty polygon is created
if poly is specified, it has to be a list of 2-tuples!
"""
self.poly = []
if poly is not None:
if isinstance(poly, list) and len(poly) > 0:
if isinstance(poly[0], tuple) and len(poly[0]) == 2:
self.poly = poly
def __getitem__(self, index):
return self.poly[index]
def __setitem__(self, index, value):
if isinstance(value, tuple) and len(value) == 2:
self.poly[index] = value
class PyRFQCell(object):
def __init__(self,
cell_type,
prev_cell=None,
next_cell=None,
debug=False,
**kwargs):
"""
:param cell_type:
STA: Start cell without length (necessary at beginning of RMS if there are no previous cells)
RMS: Radial Matching Section.
NCS: Normal Cell. A regular RFQ cell
TCS: Transition Cell.
DCS: Drift Cell. No modulation.
TRC: Trapezoidal cell (experimental, for re-bunching only!).
:param prev_cell:
:param next_cell:
:param debug:
Keyword Arguments (mostly from Parmteq Output File):
V: Intervane voltage in V
Wsyn: Energy of the synchronous particle in MeV
Sig0T: Transverse zero-current phase advance in degrees per period
Sig0L: Longitudinal zero-current phase advance in degrees per period
A10: Acceleration term [first theta-independent term in expansion]
Phi: Synchronous phase in degrees
a: Minimum radial aperture in m
m: Modulation (dimensionless)
B: Focusing parameter (dimensionless) B = q V lambda^2/(m c^2 r0^2)
L: Cell length in cm
A0: Quadrupole term [first z-independent term in expansion]
RFdef: RF defocusing term
Oct: Octupole term
A1: Duodecapole term [second z-independent term in expansion]
"""
assert cell_type in ["start", "rms", "regular",
"transition", "transition_auto", "drift", "trapezoidal"], \
"cell_type not recognized!"
self._type = cell_type
self._params = {"voltage": None,
"Wsyn": None,
"Sig0T": None,
"Sig0L": None,
"A10": None,
"Phi": None,
"a": None,
"m": None,
"B": None,
"L": None,
"A0": None,
"RFdef": None,
"Oct": None,
"A1": None,
"flip_z": False,
"shift_cell_no": False,
"fillet_radius": None
}
self._prev_cell = prev_cell
self._next_cell = next_cell
self._debug = debug
for key, item in self._params.items():
if key in kwargs.keys():
self._params[key] = kwargs[key]
if self.initialize() != 0:
print("Cell failed self-check! Aborting.")
exit(1)
self._profile_itp = None # Interpolation of the cell profile
def __str__(self):
return "Type: '{}', Aperture: {:.6f}, Modulation: {:.4f}, " \
"Length: {:.6f}, flip: {}, shift: {}".format(self._type,
self._params["a"],
self._params["m"],
self._params["L"],
self._params["flip_z"],
self._params["shift_cell_no"])
@property
def length(self):
return self._params["L"]
@property
def aperture(self):
return self._params["a"]
@property
def avg_radius(self):
return 0.5 * (self._params["a"] + self._params["m"] * self._params["a"])
@property
def cell_type(self):
return self._type
@property
def modulation(self):
return self._params["m"]
@property
def prev_cell(self):
return self._prev_cell
@property
def next_cell(self):
return self._next_cell
def calculate_transition_cell_length(self):
le = self._params["L"]
m = self._params["m"]
a = self._params["a"]
r0 = self.avg_radius
k = np.pi / np.sqrt(3.0) / le
def eta(kk):
return bessel1(0.0, kk * r0) / (3.0 * bessel1(0.0, 3.0 * kk * r0))
def func(kk):
return (bessel1(0.0, kk * m * a) + eta(kk) * bessel1(0.0, 3.0 * kk * m * a)) / \
(bessel1(0.0, kk * a) + eta(kk) * bessel1(0.0, 3.0 * kk * a)) \
+ ((m * a / r0) ** 2.0 - 1.0) / ((a / r0) ** 2.0 - 1.0)
k = root(func, k).x[0]
tcs_length = np.pi / 2.0 / k
print("Transition cell has length {} which is {} * cell length, ".format(tcs_length, tcs_length / le), end="")
assert tcs_length <= le, "Numerical determination of transition cell length " \
"yielded value larger than cell length parameter!"
if tcs_length > le:
print("the remainder will be filled with a drift.")
return tcs_length
def initialize(self):
# TODO: Refactor this maybe? seems overly complicated...
# Here we check the different cell types for consistency and minimum necessary parameters
if self._type in ["transition", "transition_auto"]:
assert self.prev_cell is not None, "A transition cell needs a preceeeding cell."
assert self.prev_cell.cell_type == "regular", "Currently a transition cell must follow a regular cell."
# Aperture:
assert self._params["a"] is not None, "No aperture given for {} cell".format(self._type)
if self._params["a"] == 'auto':
assert self._type in ["drift", "trapezoidal", "transition", "transition_auto"], \
"Unsupported cell type '{}' for auto-aperture".format(self._type)
assert self.prev_cell is not None, "Need a preceeding cell for auto aperture!"
if self.prev_cell.cell_type in ["transition", "transition_auto"]:
self._params["a"] = self.prev_cell.avg_radius
else:
self._params["a"] = self.prev_cell.aperture
self._params["a"] = np.round(self._params["a"], decimals)
# Modulation:
if self._type in ["start", "rms", "drift"]:
self._params["m"] = 1.0
assert self._params["m"] is not None, "No modulation given for {} cell".format(self._type)
if self._params["m"] == 'auto':
assert self._type in ["transition", "transition_auto"], \
"Only transition cell can have 'auto' modulation at the moment!"
self._params["m"] = self.prev_cell.modulation
self._params["m"] = np.round(self._params["m"], decimals)
# Length:
if self._type == "start":
self._params["L"] = 0.0
assert self._params["L"] is not None, "No length given for {} cell".format(self._type)
if self._params["L"] == "auto":
assert self._type == "transition_auto", "Only transition_auto cells allow auto-length!"
self._params["L"] = self.prev_cell.length # use preceeding cell length L for calculation of L'
self._params["L"] = self.calculate_transition_cell_length()
self._params["L"] = np.round(self._params["L"], decimals)
if self._type == "trapezoidal":
assert self._params["fillet_radius"] is not None, "For 'TRC' cell a fillet radius must be given!"
return 0
def set_prev_cell(self, prev_cell):
assert isinstance(prev_cell, PyRFQCell), "You are trying to set a PyRFQCell with a non-cell object!"
self._prev_cell = prev_cell
def set_next_cell(self, next_cell):
assert isinstance(next_cell, PyRFQCell), "You are trying to set a PyRFQCell with a non-cell object!"
self._next_cell = next_cell
def calculate_profile_rms(self, vane_type, cell_no):
# Assemble RMS section by finding adjacent RMS cells and get their apertures
cc = self
pc = cc.prev_cell
rms_cells = [cc]
shift = 0.0
while pc is not None and pc.cell_type == "rms":
rms_cells = [pc] + rms_cells
shift += pc.length
cc = pc
pc = cc.prev_cell
cc = self
nc = cc._next_cell
while nc is not None and nc.cell_type == "rms":
rms_cells = rms_cells + [nc]
cc = nc
nc = cc.next_cell
# Check for starting cell
assert rms_cells[0].prev_cell is not None, "Cannot assemble RMS section without a preceding cell! " \
"At the beginning ofthe RFQ consider using a start (STA) cell."
a = [0.5 * rms_cells[0].prev_cell.aperture * (1.0 + rms_cells[0].prev_cell.modulation)]
z = [0.0]
for _cell in rms_cells:
a.append(_cell.aperture)
z.append(z[-1] + _cell.length)
self._profile_itp = interp1d(np.array(z) - shift, np.array(a), kind='cubic')
return 0
def calculate_profile_transition(self, vane_type, cell_no):
le = self._params["L"]
m = self._params["m"]
a = self._params["a"]
k = np.pi / np.sqrt(3.0) / le # Initial guess
r0 = 0.5 * (a + m * a)
if self.cell_type == "transition_auto":
tcl = le
else:
tcl = self.calculate_transition_cell_length()
z = np.linspace(0.0, le, 200)
idx = np.where(z <= tcl)
vane = np.ones(z.shape) * r0
print("Average radius of transition cell (a + ma) / 2 = {}".format(r0))
def eta(kk):
return bessel1(0.0, kk * r0) / (3.0 * bessel1(0.0, 3.0 * kk * r0))
def a10(kk):
return ((m * a / r0) ** 2.0 - 1.0) / (
bessel1(0.0, kk * m * a) + eta(kk) * bessel1(0.0, 3.0 * kk * m * a))
def a30(kk):
return eta(kk) * a10(kk)
def func(kk):
return (bessel1(0.0, kk * m * a) + eta(kk) * bessel1(0.0, 3.0 * kk * m * a)) / \
(bessel1(0.0, kk * a) + eta(kk) * bessel1(0.0, 3.0 * kk * a)) \
+ ((m * a / r0) ** 2.0 - 1.0) / ((a / r0) ** 2.0 - 1.0)
k = root(func, k).x[0]
if self._params["shift_cell_no"]:
sign = (-1.0) ** (cell_no + 1)
else:
sign = (-1.0) ** cell_no
_vane = []
if "x" in vane_type:
def vane_x(xx):
return - (xx / r0) ** 2.0 \
+ sign * a10(k) * bessel1(0.0, k * xx) * np.cos(k * _z) \
+ sign * a30(k) * bessel1(0.0, 3.0 * k * xx) * np.cos(3.0 * k * _z) + 1.0
for _z in z[idx]:
_vane.append(root(vane_x, r0).x[0])
else:
def vane_y(yy):
return + (yy / r0) ** 2.0 \
+ sign * a10(k) * bessel1(0.0, k * yy) * np.cos(k * _z) \
+ sign * a30(k) * bessel1(0.0, 3.0 * k * yy) * np.cos(3.0 * k * _z) - 1.0
for _z in z[idx]:
_vane.append(root(vane_y, r0).x[0])
if self._params["flip_z"]:
_vane = _vane[::-1]
vane[np.where(z >= le - tcl)] = _vane
else:
vane[idx] = _vane
self._profile_itp = interp1d(z, vane, bounds_error=False, fill_value=0)
return 0
def calculate_profile_trapezoidal(self, vane_type, cell_no):
# TODO: This is a rough test of a trapezoidal cell: _/-\_
# TODO: tilted parts are as long as roof and start and end (cell_length/5)
fillet_radius = self._params["fillet_radius"] # m
def intersection(_p1, _v1, _p2, _v2):
s = (_v2[1] * (_p2[0] - _p1[0]) + _v2[0] * (_p1[1] - _p2[1])) / (_v1[0] * _v2[1] - _v1[1] * _v2[0])
return _p1 + s * _v1
def arc_to_poly(z1, r1, z2, r2, r_curv, invert):
"""
transform an arc into a polygon
"""
polygon = Polygon2D()
cur = 1
if invert:
cur = -1
dp = np.sqrt((z2 - z1) ** 2 + (r2 - r1) ** 2)
if r_curv < 0.5 * dp:
return None
dx = np.sqrt(abs((0.5 * dp) ** 2.0 - r_curv ** 2.0))
zc = (z1 + z2) * 0.5 - cur * dx * (r1 - r2) / dp
rc = (r1 + r2) * 0.5 + cur * dx * (z1 - z2) / dp
if round(z1 - zc, 8) == 0:
if r1 > rc:
p1 = 90
else:
p1 = 270
else:
p1 = | np.arctan((r1 - rc) / (z1 - zc)) | numpy.arctan |
import matplotlib.pylab as plt
import numpy as np
import seaborn as sns
def tanks(ts, liters, inL=6, uitL=4, uitR=3, uitR2=2, inR=1, cL=0.2, cR=0.1, kgL=0, kgR=20):
assert inL + inR == uitL + uitR, "Geen evenwicht! Meer in/uit dan in/uit"
tank1 = | np.zeros(ts + 1) | numpy.zeros |
# coding: utf-8
# Utils for multi-stream features
import torch
from torch import nn
from torch.autograd import Variable
import numpy as np
from nnmnkwii.autograd import unit_variance_mlpg
from nnmnkwii import preprocessing as P
def recompute_delta_features(Y, Y_data_mean, Y_data_std,
windows,
stream_sizes=[180, 3, 1, 3],
has_dynamic_features=[True, True, False, True]):
start_indices = np.hstack(([0], np.cumsum(stream_sizes)[:-1]))
end_indices = np.cumsum(stream_sizes)
static_stream_sizes = get_static_stream_sizes(
stream_sizes, has_dynamic_features, len(windows))
for start_idx, end_idx, static_size, has_dynamic in zip(
start_indices, end_indices, static_stream_sizes, has_dynamic_features):
if has_dynamic:
y_static = Y[:, start_idx:start_idx + static_size]
Y[:, start_idx:end_idx] = P.delta_features(y_static, windows)
return Y
def select_streams(inputs, stream_sizes=[60, 1, 1, 1],
streams=[True, True, True, True]):
ret = []
start_indices = np.hstack(([0], np.cumsum(stream_sizes)[:-1]))
for start_idx, size, enabled in zip(
start_indices, stream_sizes, streams):
if not enabled:
continue
ret.append(inputs[:, :, start_idx:start_idx + size])
return torch.cat(ret, dim=-1)
def get_static_stream_sizes(stream_sizes, has_dynamic_features, num_windows):
"""Get static dimention for each feature stream.
"""
static_stream_sizes = np.array(stream_sizes)
static_stream_sizes[has_dynamic_features] = \
static_stream_sizes[has_dynamic_features] / num_windows
return static_stream_sizes
def get_static_features(inputs, num_windows, stream_sizes=[180, 3, 1, 3],
has_dynamic_features=[True, True, False, True],
streams=[True, True, True, True]):
"""Get static features from static+dynamic features.
"""
_, _, D = inputs.size()
if stream_sizes is None or (len(stream_sizes) == 1 and has_dynamic_features[0]):
return inputs[:, :, :D // num_windows]
if len(stream_sizes) == 1 and not has_dynamic_features[0]:
return inputs
# Multi stream case
ret = []
start_indices = np.hstack(([0], np.cumsum(stream_sizes)[:-1]))
for start_idx, size, v, enabled in zip(
start_indices, stream_sizes, has_dynamic_features, streams):
if not enabled:
continue
if v:
static_features = inputs[:, :, start_idx:start_idx + size // num_windows]
else:
static_features = inputs[:, :, start_idx:start_idx + size]
ret.append(static_features)
return torch.cat(ret, dim=-1)
def multi_stream_mlpg(inputs, R,
stream_sizes=[180, 3, 1, 3],
has_dynamic_features=[True, True, False, True],
streams=[True, True, True, True]):
"""Split streams and do apply MLPG if stream has dynamic features.
"""
if R is None:
num_windows = 1
else:
num_windows = R.size(1) / R.size(0)
B, T, D = inputs.size()
if D != sum(stream_sizes):
raise RuntimeError("You probably have specified wrong dimention params.")
# Straem indices for static+delta features
# [0, 180, 183, 184]
start_indices = np.hstack(([0], np.cumsum(stream_sizes)[:-1]))
# [180, 183, 184, 187]
end_indices = np.cumsum(stream_sizes)
# Stream sizes for static features
# [60, 1, 1, 1]
static_stream_sizes = get_static_stream_sizes(
stream_sizes, has_dynamic_features, num_windows)
# [0, 60, 61, 62]
static_stream_start_indices = np.hstack(
([0], | np.cumsum(static_stream_sizes) | numpy.cumsum |
import numpy as np
from scipy.special import jv # Bessel Function of the first kind
from scipy.linalg import eig
from scipy.fftpack import fftn, ifftn, ifft
# import progressbar
from tqdm import tqdm
from scipy.ndimage import filters as fi
import math
# An implementation of the Optimally Oriented
# <NAME> and <NAME>, ``Three Dimensional Curvilinear
# Structure Detection using Optimally Oriented Flux'', ECCV 2008, pp.
# 368--382.
# <NAME> al., ``Dilated Divergence based Scale-Space
# Representation for Curve Analysis'', ECCV 2012, pp. 557--571.
# Author: <NAME>
def response(img, rsptype='oof', **kwargs):
eps = 1e-12
rsp = np.zeros(img.shape)
# bar = progressbar.ProgressBar(max_value=kwargs['radii'].size)
# bar.update(0)
W = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3)) # Eigen values to save
V = np.zeros((img.shape[0], img.shape[1], img.shape[2], 3, 3)) # Eigen vectors to save
if rsptype == 'oof' :
rsptensor = ooftensor(img, kwargs['radii'], kwargs['memory_save'])
elif rsptype == 'bg':
rsptensor = bgtensor(img, kwargs['radii'], kwargs['rho'])
pbar = tqdm(total=len(kwargs['radii']))
for i, tensorfield in enumerate(rsptensor):
# Make the tensor from tensorfield
f11, f12, f13, f22, f23, f33 = tensorfield
tensor = np.stack((f11, f12, f13, f12, f22, f23, f13, f23, f33), axis=-1)
del f11
del f12
del f13
del f22
del f23
del f33
tensor = tensor.reshape(img.shape[0], img.shape[1], img.shape[2], 3, 3)
w, v = np.linalg.eigh(tensor)
del tensor
sume = w.sum(axis=-1)
nvox = img.shape[0] * img.shape[1] * img.shape[2]
sortidx = np.argsort(np.abs(w), axis=-1)
sortidx = sortidx.reshape((nvox, 3))
# Sort eigenvalues according to their abs
w = w.reshape((nvox, 3))
for j, (idx, value) in enumerate(zip(sortidx, w)):
w[j,:] = value[idx]
w = w.reshape(img.shape[0], img.shape[1], img.shape[2], 3)
# Sort eigenvectors according to their abs
v = v.reshape((nvox, 3, 3))
for j, (idx, vec) in enumerate(zip(sortidx, v)):
v[j,:,:] = vec[:, idx]
del sortidx
v = v.reshape(img.shape[0], img.shape[1], img.shape[2], 3, 3)
mine = w[:,:,:, 0]
mide = w[:,:,:, 1]
maxe = w[:,:,:, 2]
if rsptype == 'oof':
feat = maxe
elif rsptype == 'bg':
feat = -mide / maxe * (mide + maxe) # Medialness measure response
cond = sume >= 0
feat[cond] = 0 # Filter the non-anisotropic voxels
del mine
del maxe
del mide
del sume
cond = np.abs(feat) > np.abs(rsp)
W[cond, :] = w[cond, :]
V[cond, :, :] = v[cond, :, :]
rsp[cond] = feat[cond]
del v
del w
del tensorfield
del feat
del cond
pbar.update(1)
return rsp, V, W
def bgkern3(kerlen, mu=0, sigma=3., rho=0.2):
'''
Generate the bi-gaussian kernel
'''
sigma_b = rho * sigma
k = rho ** 2
kr = (kerlen - 1) / 2
X, Y, Z = np.meshgrid(np.arange(-kr, kr+1),
np.arange(-kr, kr+1),
np.arange(-kr, kr+1))
dist = np.linalg.norm(np.stack((X, Y, Z)), axis=0)
G = gkern3(dist, mu, sigma) # Normal Gaussian with mean at origin
Gb = gkern3(dist, sigma-sigma_b, sigma_b)
c0 = k * Gb[0, 0, math.floor(sigma_b)] - G[0, 0, math.floor(sigma)]
c1 = G[0, 0, math.floor(sigma)] - k * Gb[0, 0, math.floor(sigma_b)] + c0
G += c0
Gb = k * Gb + c1 # Inverse Gaussian with phase shift
# Replace the centre of Gb with G
central_region = dist <= sigma
del dist
X = (X[central_region] + kr).astype('int')
Y = (Y[central_region] + kr).astype('int')
Z = (Z[central_region] + kr).astype('int')
Gb[X, Y, Z] = G[X, Y, Z]
return Gb
def eigh(a, UPLO='L'):
# I Borrowed from Dipy
"""Iterate over `np.linalg.eigh` if it doesn't support vectorized operation
Parameters
----------
a : array_like (..., M, M)
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray (..., M)
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : ndarray (..., M, M)
The column ``v[..., :, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[..., i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
np.linalg.eigh
"""
a = np.asarray(a)
if a.ndim > 2 and NUMPY_LESS_1_8:
shape = a.shape[:-2]
a = a.reshape(-1, a.shape[-2], a.shape[-1])
evals = np.empty((a.shape[0], a.shape[1]))
evecs = np.empty((a.shape[0], a.shape[1], a.shape[1]))
for i, item in enumerate(a):
evals[i], evecs[i] = np.linalg.eigh(item, UPLO)
return (evals.reshape(shape + (a.shape[1], )),
evecs.reshape(shape + (a.shape[1], a.shape[1])))
return np.linalg.eigh(a, UPLO)
def gkern3(dist, mu=0., sigma=3.):
'''
Make 3D gaussian kernel
'''
# Make a dirac spherical function
return np.exp(-0.5 * (((dist - mu) / sigma)**2)) / (sigma * np.sqrt(2. * np.pi))
def hessian3(x):
"""
Calculate the hessian matrix with finite differences
Parameters:
- x : ndarray
Returns:
an array of shape (x.dim, x.ndim) + x.shape
where the array[i, j, ...] corresponds to the second derivative x_ij
"""
x_grad = np.gradient(x)
tmpgrad = np.gradient(x_grad[0])
f11 = tmpgrad[0]
f12 = tmpgrad[1]
f13 = tmpgrad[2]
tmpgrad = np.gradient(x_grad[1])
f22 = tmpgrad[1]
f23 = tmpgrad[2]
tmpgrad = np.gradient(x_grad[2])
f33 = tmpgrad[2]
return [f11, f12, f13, f22, f23, f33]
def bgtensor(img, lsigma, rho=0.2):
eps = 1e-12
fimg = fftn(img, overwrite_x=True)
for s in lsigma:
jvbuffer = bgkern3(kerlen=math.ceil(s)*6+1, sigma=s, rho=rho)
jvbuffer = fftn(jvbuffer, shape=fimg.shape, overwrite_x=True) * fimg
fimg = ifftn(jvbuffer, overwrite_x=True)
yield hessian3(np.real(fimg))
def eigval33(tensorfield):
''' Calculate the eigenvalues of massive 3x3 real symmetric matrices. '''
a11, a12, a13, a22, a23, a33 = tensorfield
eps = 1e-50
b = a11 + eps
d = a22 + eps
j = a33 + eps
c = - a12**2. - a13**2. - a23**2. + b * d + d * j + j* b
d = - b * d * j + a23**2. * b + a12**2. * j - a13**2. * d + 2. * a13 * a12 * a23
b = - a11 - a22 - a33 - 3. * eps
d = d + (2. * b**3. - 9. * b * c) / 27
c = b**2. / 3. - c
c = c**3.
c = c / 27
c[c < 0] = 0
c = np.sqrt(c)
j = c ** (1./3.)
c = c + (c==0).astype('float')
d = -d /2. /c
d[d>1] = 1
d[d<-1] = 1
d = np.real(np.arccos(d) / 3.)
c = j * np.cos(d)
d = j * np.sqrt(3.) * np.sin(d)
b = -b / 3.
j = -c - d + b
d = -c + d + b
b = 2. * c + b
return b, j, d
def oofftkernel(kernel_radius, r, sigma=1, ntype=1):
eps = 1e-12
normalisation = 4/3 * np.pi * r**3 / (jv(1.5, 2*np.pi*r*eps) / eps ** (3/2)) / r**2 * \
(r / np.sqrt(2.*r*sigma - sigma**2)) ** ntype
jvbuffer = normalisation * np.exp( (-2 * sigma**2 * np.pi**2 * kernel_radius**2) / (kernel_radius**(3/2) ))
return (np.sin(2 * np.pi * r * kernel_radius) / (2 * np.pi * r * kernel_radius) - np.cos(2 * np.pi * r * kernel_radius)) * \
jvbuffer * np.sqrt( 1./ (np.pi**2 * r *kernel_radius ))
def ooftensor(img, radii, memory_save=True):
'''
type: oof, bg
'''
# sigma = 1 # TODO: Pixel spacing
eps = 1e-12
# ntype = 1 # The type of normalisation
fimg = fftn(img, overwrite_x=True)
shiftmat = ifftshiftedcoormatrix(fimg.shape)
x, y, z = shiftmat
x = x / fimg.shape[0]
y = y / fimg.shape[1]
z = z / fimg.shape[2]
kernel_radius = np.sqrt(x ** 2 + y ** 2 + z ** 2) + eps # The distance from origin
for r in radii:
# Make the fourier convolutional kernel
jvbuffer = oofftkernel(kernel_radius, r) * fimg
if memory_save:
# F11
buffer = ifftshiftedcoordinate(img.shape, 0) ** 2 * x * x * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f11 = buffer.copy()
# F12
buffer = ifftshiftedcoordinate(img.shape, 0) * ifftshiftedcoordinate(img.shape, 1) * x * y * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f12 = buffer.copy()
# F13
buffer = ifftshiftedcoordinate(img.shape, 0) * ifftshiftedcoordinate(img.shape, 2) * x * z * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f13 = buffer.copy()
# F22
buffer = ifftshiftedcoordinate(img.shape, 1) ** 2 * y ** 2 * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f22 = buffer.copy()
# F23
buffer = ifftshiftedcoordinate(img.shape, 1) * ifftshiftedcoordinate(img.shape, 2) * y * z * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f23 = buffer.copy()
# F33
buffer = ifftshiftedcoordinate(img.shape, 2) * ifftshiftedcoordinate(img.shape, 2) * z * z * jvbuffer
buffer = ifft(buffer, axis=0)
buffer = ifft(buffer, axis=1)
buffer = ifft(buffer, axis=2)
f33 = buffer.copy()
else:
f11 = np.real(ifftn(x * x * jvbuffer))
f12 = np.real(ifftn(x * y * jvbuffer))
f13 = np.real(ifftn(x * z * jvbuffer))
f22 = np.real(ifftn(y * y * jvbuffer))
f23 = np.real(ifftn(y * z * jvbuffer))
f33 = np.real(ifftn(z * z * jvbuffer))
yield [f11, f12, f13, f22, f23, f33]
# The dimension is a vector specifying the size of the returned coordinate
# matrices. The number of output argument is equals to the dimensionality
# of the vector "dimension". All the dimension is starting from "1"
def ifftshiftedcoormatrix(shape):
shape = np.asarray(shape)
p = np.floor(np.asarray(shape) / 2).astype('int')
coord = []
for i in range(shape.size):
a = np.hstack((np.arange(p[i], shape[i]), np.arange(0, p[i]))) - p[i] - 1.
repmatpara = np.ones((shape.size,)).astype('int')
repmatpara[i] = shape[i]
A = a.reshape(repmatpara)
repmatpara = shape.copy()
repmatpara[i] = 1
coord.append(np.tile(A, repmatpara))
return coord
def ifftshiftedcoordinate(shape, axis):
shape = np.asarray(shape)
p = np.floor(np.asarray(shape) / 2).astype('int')
a = (np.hstack((np.arange(p[axis], shape[axis]), | np.arange(0, p[axis]) | numpy.arange |
'''
Author: Dr. <NAME>
Required packages: scikit-learn, scipy, numpy
This library contains a class for individual pointclouds ( ProcessPC() ), and classes for lists of pointclouds
( iterator_points_pointlabels() , iterator_points() , iterator_binaryVoxels() , iterator_binaryVoxels_pointlabels() ).
ProcessPC() is useful for various applications where you want a single pointcloud to be an object, and you wish you
to mutate that object in different ways (e.g. voxelise, rasterise, normalise, rotate, etc.)
The iterator classes are designed for machine learning applications. They are useful as iterators when you want to
use several pointclouds to train (or predict with) a machine learning model (e.g. return the next batch of
pointclouds to train on).
'''
import numpy as np
import copy
import random
from scipy import signal #only for bev vertical density
from scipy import ndimage #only for bev max height
from sklearn.neighbors import KDTree # only for ground normalisation
# Insert another class which, given a list of pointclouds, splits data into training and val upon init.
# Contains get batch, when called it, pulls random batch of pointclouds as list, loops through list using ProcessPC
def gaussian_kernel(size,mu=0.0,sigma=1.0):
x, y = np.meshgrid(np.linspace(-1, 1, size), np.linspace(-1, 1, size))
d = np.sqrt(x * x + y * y)
g = np.exp(-((d - mu) ** 2 / (2.0 * sigma ** 2)))
return g
class iterator_points_pointlabels():
def __init__(self, xyz_list=None,labels=None,returns=None, numClasses=None, batchsize=None ):
self.data_num = len(xyz_list)
if numClasses is None:
self.nb_class = np.size(np.unique(labels[0])) # only counts number of classes in first pcd
else:
self.nb_class = numClasses
self.pc_list = []
for i in range(len(xyz_list)):
if (labels is None) & (returns is None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i]))
elif (labels is None) & (returns is not None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i], pc_returns=returns[i]))
elif (labels is not None) & (returns is None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i], pc_labels=labels[i]))
else:
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i],pc_labels=labels[i],pc_returns=returns[i]))
if labels is None:
self.flag_label = 0
else:
self.flag_label = 1
if returns is None:
self.flag_return = 0
else:
self.flag_return = 1
if batchsize is not None:
self.batchsize = batchsize
else:
self.batchsize = self.data_num
self.current_batch = np.arange(self.batchsize)
def next_batch( self, augment=False, pre_process=False, addAxis=False ):
# augments the current batch once
# use with network_pointnet_bs1 (only works for batchsize of 1)
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[0]])
if augment==True:
pc_temp.augmentation_Rotation()
pc_temp.augmentation_Flipping()
pc_batch = pc_temp.pc.copy()
if addAxis: # if using bsK, want output to be K x 1 x N x 3, where k=1
pc_batch = pc_batch[np.newaxis,...]
pc_batch = pc_batch[np.newaxis, ...]
if pre_process:
pc_batch[0, ...] -= np.min(pc_batch[i, ...], axis=0)
pc_batch[0, ...] /= np.max(pc_batch[i, ...])
if self.flag_label == 1:
labels = np.array(pc_temp.pc_labels)
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
if self.flag_return == 1:
pc_returns_batch = np.array(pc_temp.pc_returns)
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
if (self.flag_label == 1) & (self.flag_return == 0):
return pc_batch, labels_onehot, labels
elif (self.flag_label == 0) & (self.flag_return == 1):
return pc_batch, pc_returns_batch
elif (self.flag_label == 1) & (self.flag_return == 1):
return pc_batch, labels_onehot, labels, pc_returns_batch
else:
return pc_batch
def next_batch2( self, augment=False, numAugs=0, pre_process=False, angle_x_randLim=0, angle_y_randLim=0, normalise=False, newAxis_loc=1 ):
# appends numAugs different augmentations to the current batch
n_points = self.pc_list[self.current_batch[0]].pc.shape[0]
pc_batch = np.empty( ( [self.batchsize*(numAugs+1),n_points,3] ) )
labels = np.empty( ( [self.batchsize*(numAugs+1),n_points] ) )
returns_batch = np.empty( ( [self.batchsize*(numAugs+1),n_points] ) )
for j in range(numAugs+1):
for i in range(self.batchsize):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[i]])
if (augment==True)&(j>0): # leave one set un-augmented
pc_temp.augmentation_Rotation(angle_x_randLim=angle_x_randLim, angle_y_randLim=angle_y_randLim)
pc_temp.augmentation_Flipping()
if normalise:
pc_temp.normalisation()
pc_batch[(j*self.batchsize)+i,...] = pc_temp.pc.copy()
labels[(j*self.batchsize)+i,...] = pc_temp.pc_labels.copy()
if self.flag_return == 1:
returns_batch[(j * self.batchsize) + i, ...] = pc_temp.pc_returns.copy()
# pre-process
if pre_process:
pc_batch[0, ...] -= np.min(pc_batch[i, ...], axis=0)
pc_batch[0, ...] /= np.max(pc_batch[i, ...])
if newAxis_loc == 1:
pc_batch = pc_batch[:, np.newaxis, ...]
elif newAxis_loc == 0:
pc_batch = pc_batch[np.newaxis, ...]
#labels = np.array(pc_temp.pc_labels)
#labels = np.tile(labels[:,np.newaxis],(1,numAugs+1))
labels_onehot = np.zeros((self.batchsize*(numAugs+1) , n_points , self.nb_class))
#labels_onehot[:,np.arange(n_points), labels.astype(np.int).T] = 1
xv, yv = np.meshgrid(np.arange(0, (self.batchsize*(numAugs+1))), np.arange(0, n_points))
labels_onehot[np.ravel(xv), np.ravel(yv), np.ravel( labels.astype(np.int).T )] = 1
#labels = np.tile(labels[np.newaxis,:],[numAugs+1,1])
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
if self.flag_return == 1:
return pc_batch, labels_onehot,labels, returns_batch
else:
return pc_batch, labels_onehot,labels
def get_pc(self, idx=[1], augment=False, angle_x=0, angle_y=0, angle_z=30, pre_process=False, addAxis=False, normalise=False, newAxis_loc=1 ):
# default not to augment, but if so you can specify the rotations. Default rotation only about z
n_points = self.pc_list[idx[0]].pc.shape[0]
pc_batch = np.empty(([len(idx), n_points, 3]))
if self.flag_label==1:
labels = np.empty(([len(idx), n_points]))
if self.flag_return == 1:
returns_batch = np.empty(([len(idx), n_points]))
for i in range(len(idx)):
pc_temp = copy.deepcopy(self.pc_list[idx[i]])
if augment==True:
pc_temp.augmentation_Rotation(angle_x=angle_x, angle_y=angle_y, angle_z=angle_z)
#pc_temp.augmentation_Flipping()
if normalise:
pc_temp.normalisation()
pc_batch[i,...] = pc_temp.pc.copy()
if self.flag_label == 1:
labels[i,:] = np.array(pc_temp.pc_labels)
if self.flag_return == 1:
returns_batch[i,:] = np.array(pc_temp.pc_returns)
if addAxis:
if newAxis_loc == 1:
pc_batch = pc_batch[:, np.newaxis, ...]
elif newAxis_loc == 0:
pc_batch = pc_batch[np.newaxis, ...]
if self.flag_label == 1:
labels_onehot = np.zeros((len(idx), n_points, self.nb_class))
xv, yv = np.meshgrid(np.arange(0, (len(idx))), np.arange(0, n_points))
labels_onehot[np.ravel(xv), np.ravel(yv), np.ravel(labels.astype(np.int).T)] = 1
if (self.flag_label == 1) & (self.flag_return == 0):
return pc_batch, labels_onehot, labels
elif (self.flag_label == 0) & (self.flag_return == 1):
return pc_batch, returns_batch
elif (self.flag_label == 1) & (self.flag_return == 1):
return pc_batch, labels_onehot, labels, returns_batch
else:
return pc_batch
def indexed_batch( self, idx=[1], augment=False, numAugs=0, pre_process=False, angle_x_randLim=0, angle_y_randLim=0, normalise=False, newAxis_loc=1, adapt_num_classes=False ):
# appends numAugs different augmentations to the current batch
n_points = self.pc_list[idx[0]].pc.shape[0]
pc_batch = np.empty( ( [len(idx)*(numAugs+1),n_points,3] ) )
labels = np.empty( ( [len(idx)*(numAugs+1),n_points] ) )
returns_batch = np.empty(([len(idx)*(numAugs+1),n_points]))
for j in range(numAugs+1):
for i in range(len(idx)):
pc_temp = copy.deepcopy(self.pc_list[idx[i]])
if (augment==True)&(j>0): # leave one set un-augmented
pc_temp.augmentation_Rotation(angle_x_randLim=angle_x_randLim, angle_y_randLim=angle_y_randLim)
pc_temp.augmentation_Flipping()
#pc_temp.augmentation_Shuffle()
if normalise:
pc_temp.normalisation()
pc_batch[(j*len(idx))+i,...] = pc_temp.pc.copy()
labels[(j*len(idx))+i,...] = pc_temp.pc_labels.copy()
if self.flag_return == 1:
returns_batch[(j*len(idx))+i,...] = pc_temp.pc_returns.copy()
# pre-process
if pre_process:
pc_batch[0, ...] -= np.min(pc_batch[i, ...], axis=0)
pc_batch[0, ...] /= np.max(pc_batch[i, ...])
if newAxis_loc == 1:
pc_batch = pc_batch[:,np.newaxis, ...]
elif newAxis_loc == 0:
pc_batch = pc_batch[np.newaxis, ...]
#labels = np.array(pc_temp.pc_labels)
#labels = np.tile(labels[:,np.newaxis],(1,numAugs+1))
if adapt_num_classes: # allows number of classes (and hence size of onehot) to be modified each batch
self.nb_class = len(np.unique(labels))
labels_onehot = np.zeros((len(idx)*(numAugs+1) , n_points , self.nb_class))
#labels_onehot[:,np.arange(n_points), labels.astype(np.int).T] = 1
xv, yv = np.meshgrid(np.arange(0, (len(idx)*(numAugs+1))), np.arange(0, n_points))
labels_onehot[np.ravel(xv), np.ravel(yv), np.ravel( labels.astype(np.int).T )] = 1
#labels = np.tile(labels[np.newaxis,:],[numAugs+1,1])
if self.flag_return == 1:
return pc_batch, labels_onehot,labels, returns_batch
else:
return pc_batch, labels_onehot,labels
def reset_batch(self):
""" Resets the current batch to the beginning.
"""
self.current_batch = np.arange(self.batchsize)
def shuffle(self):
""" Randomly permutes all dataSamples (and corresponding targets).
"""
random.shuffle(self.pc_list)
class iterator_points():
def __init__(self, xyz_list=None,labels=None, batchsize=None ):
self.data_num = len(xyz_list)
self.nb_class = np.max(labels)+1
self.first_batch = 1
self.current_batch = []
self.labels = labels
self.pc_list = []
for i in range(len(xyz_list)):
self.pc_list.append(ProcessPC(xyz_list[i]))
if batchsize is not None:
self.batchsize = batchsize
else:
self.batchsize = self.data_num
self.current_batch = np.arange(self.batchsize)
def next_batch( self, augment=False, pre_process=False ):
# augments the current batch once
if self.first_batch:
self.current_batch = (np.arange(self.batchsize)).tolist()
self.first_batch = 0
else:
self.current_batch = (np.array(self.current_batch) + self.batchsize).tolist()
if self.current_batch[-1] > (self.data_num - self.batchsize):
self.first_batch = 1
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[0]])
if augment==True:
pc_temp.augmentation_Rotation()
pc_temp.augmentation_Flipping()
pc_batch = pc_temp.pc.copy()
pc_batch = pc_batch[np.newaxis,...]
# pre-process - scale between [-1,1]
#if pre_process:
#og_batch = 2*(og_batch-0.5)
labels = np.array(self.labels)
labels = labels[self.current_batch]
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
return pc_batch, labels_onehot, labels
def next_batch2( self, augment=False, numAugs=0, pre_process=False ):
# appends numAugs different augmentations to the current batch
if self.first_batch:
self.current_batch = (np.arange(self.batchsize)).tolist()
self.first_batch = 0
else:
self.current_batch = (np.array(self.current_batch) + self.batchsize).tolist()
if self.current_batch[-1] > (self.data_num - self.batchsize):
self.first_batch = 1
n_points = self.pc_list[self.current_batch[0]].pc.shape[0]
pc_batch = np.empty( ( [self.batchsize*(numAugs+1),n_points,3] ) )
for j in range(numAugs+1):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[0]])
if (augment==True)&(j>1): # leave one set un-augmented
pc_temp.augmentation_Rotation()
pc_temp.augmentation_Flipping()
pc_batch[j,...] = pc_temp.pc.copy()
# pre-process - scale between [-1,1]
#if pre_process:
#og_batch = 2*(og_batch-0.5)
labels = np.array(self.labels)
labels = labels[self.current_batch]
labels = np.tile(labels,(numAugs+1))
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
return pc_batch, labels_onehot, labels
def get_pc(self, idx=[1], augment=False, angle_x=0, angle_y=0, angle_z=30):
# default not to augment, but if so you can specify the rotations. Default rotation only about z
pc_temp = copy.deepcopy(self.pc_list[idx[0]])
if augment==True:
pc_temp.augmentation_Rotation(angle_x=angle_x, angle_y=angle_y, angle_z=angle_z)
#pc_temp.augmentation_Flipping()
pc_batch = pc_temp.pc.copy()
pc_batch = pc_batch[np.newaxis, ...]
labels = np.array(self.labels)
labels = labels[idx]
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
return pc_batch, labels_onehot, labels
def reset_batch(self):
""" Resets the current batch to the beginning.
"""
self.current_batch = np.arange(self.batchsize)
def shuffle(self):
""" Randomly permutes all dataSamples (and corresponding targets).
"""
if self.labels is not None:
zipped = list(zip(self.pc_list, self.labels))
random.shuffle(zipped)
self.pc_list, self.labels = list(zip(*zipped))
else:
random.shuffle(self.pc_list)
class iterator_binaryVoxels_pointlabels():
def __init__(self, xyz_list=None,labels=None,returns=None, res=0.1, gridSize=[32,32,32], numClasses=None, batchsize=None):
# make sure to input more than one pcd
self.data_num = len(xyz_list)
if numClasses is None:
self.nb_class = np.size(np.unique(labels[0])) # only counts number of classes in first pcd
else:
self.nb_class = numClasses
self.pc_list = []
for i in range(len(xyz_list)):
if (labels is None) & (returns is None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i]))
elif (labels is None) & (returns is not None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i], pc_returns=returns[i]))
elif (labels is not None) & (returns is None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i], pc_labels=labels[i]))
else:
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i],pc_labels=labels[i],pc_returns=returns[i]))
self.res=res
self.gridSize=np.array(gridSize)
if labels is None:
self.flag_label = 0
else:
self.flag_label = 1
if returns is None:
self.flag_return = 0
else:
self.flag_return = 1
if batchsize is not None:
self.batchsize = batchsize
else:
self.batchsize = self.data_num
self.current_batch = np.arange(self.batchsize)
def next_batch( self, augment=False, pre_process=False, outputOffset=False ):
# augments the current batch once
og_batch = np.empty( ( [self.batchsize,1] + self.gridSize.tolist() ) )
offset = np.empty(([self.batchsize] + [3]))
if self.flag_label == 1:
og_labels_batch = np.empty(([self.batchsize, self.nb_class+1] + self.gridSize.tolist())) #+1 for free space
if self.flag_return == 1:
og_returns_batch = np.empty(([self.batchsize, 1] + self.gridSize.tolist()))
for i in range(self.batchsize):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[i]])
if augment==True:
pc_temp.augmentation_Rotation( )
pc_temp.augmentation_Flipping( )
pc_temp.occupancyGrid_Binary( res_=self.res, gridSize_=self.gridSize )
og_batch[i,...] = pc_temp.og.copy()
offset[i, :] = pc_temp._ProcessPC__centre
if self.flag_label == 1:
pc_temp.occupancyGrid_Labels()
og_labels_batch[i, ...] = pc_temp.og_labels.copy()
if self.flag_return == 1:
pc_temp.occupancyGrid_Returns()
og_returns_batch[i,...] = pc_temp.og_returns.copy()
# pre-process - scale between [-1,1]
if pre_process:
og_batch = 2*(og_batch-0.5)
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
if outputOffset is False:
if (self.flag_label == 1)&(self.flag_return == 0):
return og_batch, og_labels_batch
elif (self.flag_label == 0)&(self.flag_return == 1):
return og_batch, og_returns_batch
elif (self.flag_label == 1)&(self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch
else:
return og_batch
else:
if (self.flag_label == 1)&(self.flag_return == 0):
return og_batch, og_labels_batch, offset
elif (self.flag_label == 0)&(self.flag_return == 1):
return og_batch, og_returns_batch, offset
elif (self.flag_label == 1)&(self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch, offset
else:
return og_batch, offset
def next_batch2( self, augment=False, numAugs=0, pre_process=False,angle_x_randLim=0, angle_y_randLim=0 ):
# appends numAugs different augmentations to the current batch
og_batch = np.empty( ( [self.batchsize*(numAugs+1),1] + self.gridSize.tolist() ) )
if self.flag_label == 1:
og_labels_batch = np.empty( ( [self.batchsize*(numAugs+1),self.nb_class+1] + self.gridSize.tolist() ) )
if self.flag_return == 1:
og_returns_batch = np.empty(([self.batchsize * (numAugs + 1), 1] + self.gridSize.tolist()))
for j in range(numAugs+1):
for i in range(self.batchsize):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[i]])
# augment pointcloud
if (augment==True)&(j>0): # leave one set un-augmented
pc_temp.augmentation_Rotation(angle_x_randLim=angle_x_randLim, angle_y_randLim=angle_y_randLim )
pc_temp.augmentation_Flipping( )
# occupancy grid
pc_temp.occupancyGrid_Binary( res_=self.res, gridSize_=self.gridSize )
og_batch[(j*self.batchsize)+i,...] = pc_temp.og.copy()
# labelled occupancy grid
if self.flag_label == 1:
pc_temp.occupancyGrid_Labels()
og_labels_batch[(j*self.batchsize)+i, ...] = pc_temp.og_labels.copy()
# occupancy grid with returns
if self.flag_return == 1:
pc_temp.occupancyGrid_Returns()
og_returns_batch[(j*self.batchsize)+i,...] = pc_temp.og_returns.copy()
# pre-process - scale between [-1,1]
if pre_process:
og_batch = 2*(og_batch-0.5)
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
if (self.flag_label == 1) & (self.flag_return == 0):
return og_batch, og_labels_batch
elif (self.flag_label == 0) & (self.flag_return == 1):
return og_batch, og_returns_batch
elif (self.flag_label == 1) & (self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch
else:
return og_batch
def get_pc(self, idx=[1,2,3], augment=False, angle_x=0, angle_y=0, angle_z=30, angle_x_randLim=0, angle_y_randLim=0, outputOffset=False ):
# default not to augment, but if so you can specify the rotations. Default rotation only about z. set to none for random rotation
# useful for inference because doesnt need labels
og_batch = np.empty( ( [len(idx),1] + self.gridSize.tolist() ) )
offset = np.empty( ( [len(idx)] + [3] ) )
if self.flag_label == 1:
og_labels_batch = np.empty(([len(idx), self.nb_class+1] + self.gridSize.tolist()))
if self.flag_return == 1:
og_returns_batch = np.empty(([len(idx), 1] + self.gridSize.tolist()))
for i in range(len(idx)):
pc_temp = copy.deepcopy(self.pc_list[idx[i]])
if augment==True:
pc_temp.augmentation_Rotation(angle_x=angle_x, angle_y=angle_y, angle_z=angle_z, angle_x_randLim=angle_x_randLim, angle_y_randLim=angle_y_randLim )
#pc_temp.augmentation_Flipping()
pc_temp.occupancyGrid_Binary( res_=self.res, gridSize_=self.gridSize )
og_batch[i,...] = pc_temp.og.copy()
offset[i,:] = pc_temp._ProcessPC__centre
if self.flag_label == 1:
pc_temp.occupancyGrid_Labels()
og_labels_batch[i,...] = pc_temp.og_labels.copy()
if self.flag_return == 1:
pc_temp.occupancyGrid_Returns()
og_returns_batch[i,...] = pc_temp.og_returns.copy()
if outputOffset is False:
if (self.flag_label == 1)&(self.flag_return == 0):
return og_batch, og_labels_batch
elif (self.flag_label == 0)&(self.flag_return == 1):
return og_batch, og_returns_batch
elif (self.flag_label == 1)&(self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch
else:
return og_batch
else:
if (self.flag_label == 1)&(self.flag_return == 0):
return og_batch, og_labels_batch, offset
elif (self.flag_label == 0)&(self.flag_return == 1):
return og_batch, og_returns_batch, offset
elif (self.flag_label == 1)&(self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch, offset
else:
return og_batch, offset
def reset_batch(self):
""" Resets the current batch to the beginning.
"""
self.current_batch = | np.arange(self.batchsize) | numpy.arange |
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Planar Stacker domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import xml_tools
from lxml import etree
import numpy as np
_CLOSE = .01 # (Meters) Distance below which a thing is considered close.
_CONTROL_TIMESTEP = .01 # (Seconds)
_TIME_LIMIT = 10 # (Seconds)
_ARM_JOINTS = ['arm_root', 'arm_shoulder', 'arm_elbow', 'arm_wrist',
'finger', 'fingertip', 'thumb', 'thumbtip']
SUITE = containers.TaggedTasks()
def make_model(n_boxes):
"""Returns a tuple containing the model XML string and a dict of assets."""
xml_string = common.read_model('stacker.xml')
parser = etree.XMLParser(remove_blank_text=True)
mjcf = etree.XML(xml_string, parser)
# Remove unused boxes
for b in range(n_boxes, 4):
box = xml_tools.find_element(mjcf, 'body', 'box' + str(b))
box.getparent().remove(box)
return etree.tostring(mjcf, pretty_print=True), common.ASSETS
@SUITE.add('hard')
def stack_2(fully_observable=True, time_limit=_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns stacker task with 2 boxes."""
n_boxes = 2
physics = Physics.from_xml_string(*make_model(n_boxes=n_boxes))
task = Stack(n_boxes=n_boxes,
fully_observable=fully_observable,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('hard')
def stack_4(fully_observable=True, time_limit=_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns stacker task with 4 boxes."""
n_boxes = 4
physics = Physics.from_xml_string(*make_model(n_boxes=n_boxes))
task = Stack(n_boxes=n_boxes,
fully_observable=fully_observable,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics with additional features for the Planar Manipulator domain."""
def bounded_joint_pos(self, joint_names):
"""Returns joint positions as (sin, cos) values."""
joint_pos = self.named.data.qpos[joint_names]
return np.vstack([np.sin(joint_pos), np.cos(joint_pos)]).T
def joint_vel(self, joint_names):
"""Returns joint velocities."""
return self.named.data.qvel[joint_names]
def body_2d_pose(self, body_names, orientation=True):
"""Returns positions and/or orientations of bodies."""
if not isinstance(body_names, str):
body_names = np.array(body_names).reshape(-1, 1) # Broadcast indices.
pos = self.named.data.xpos[body_names, ['x', 'z']]
if orientation:
ori = self.named.data.xquat[body_names, ['qw', 'qy']]
return np.hstack([pos, ori])
else:
return pos
def touch(self):
return np.log1p(self.data.sensordata)
def site_distance(self, site1, site2):
site1_to_site2 = np.diff(self.named.data.site_xpos[[site2, site1]], axis=0)
return np.linalg.norm(site1_to_site2)
class Stack(base.Task):
"""A Stack `Task`: stack the boxes."""
def __init__(self, n_boxes, fully_observable, random=None):
"""Initialize an instance of the `Stack` task.
Args:
n_boxes: An `int`, number of boxes to stack.
fully_observable: A `bool`, whether the observation should contain the
positions and velocities of the boxes and the location of the target.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._n_boxes = n_boxes
self._box_names = ['box' + str(b) for b in range(n_boxes)]
self._box_joint_names = ['_'.join([name, dim])
for name in self._box_names
for dim in 'xzy']
self._fully_observable = fully_observable
super(Stack, self).__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
# Local aliases
randint = self.random.randint
uniform = self.random.uniform
model = physics.named.model
data = physics.named.data
# Find a collision-free random initial configuration.
penetrating = True
while penetrating:
# Randomise angles of arm joints.
is_limited = model.jnt_limited[_ARM_JOINTS].astype(np.bool)
joint_range = model.jnt_range[_ARM_JOINTS]
lower_limits = | np.where(is_limited, joint_range[:, 0], -np.pi) | numpy.where |
import os
import sys
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
import torch
def get_data_into_loaders(data_x, data_y, batch_size, DataSetClass, rand_seed=42, test_ratio=0.3):
"""
Helper function that takes structured data_x and data_y into dataloaders
:param data_x: the structured x data
:param data_y: the structured y data
:param rand_seed: the random seed
:param test_ratio: The testing ratio
:return: train_loader, test_loader: The pytorch data loader file
"""
# Normalize the input
x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=test_ratio,
random_state=rand_seed)
print('total number of training sample is {}, the dimension of the feature is {}'.format(len(x_train), len(x_train[0])))
print('total number of test sample is {}'.format(len(y_test)))
# Construct the dataset using a outside class
train_data = DataSetClass(x_train, y_train)
test_data = DataSetClass(x_test, y_test)
# Construct train_loader and test_loader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size)
return train_loader, test_loader
def normalize_np(x):
"""
Normalize the x into [-1, 1] range in each dimension [:, i]
:param x: np array to be normalized
:return: normalized np array
"""
for i in range(len(x[0])):
x_max = np.max(x[:, i])
x_min = np.min(x[:, i])
x_range = (x_max - x_min ) /2.
x_avg = (x_max + x_min) / 2.
x[:, i] = (x[:, i] - x_avg) / x_range
print("In normalize_np, row ", str(i), " your max is:", np.max(x[:, i]))
print("In normalize_np, row ", str(i), " your min is:", np.min(x[:, i]))
assert np.max(x[:, i]) - 1 < 0.0001, 'your normalization is wrong'
assert np.min(x[:, i]) + 1 < 0.0001, 'your normalization is wrong'
return x
def read_data_ADM(flags, eval_data_all=False):
if flags.test_ratio == 0:
# Read the data
data_dir = os.path.join(flags.data_dir, 'ADM_60k', 'eval')
test_x = pd.read_csv(os.path.join(data_dir, 'test_x.csv'), header=None).astype('float32').values
test_y = pd.read_csv(os.path.join(data_dir, 'test_y.csv'), header=None).astype('float32').values
test_x = normalize_np(test_x)
print("shape of test_x", np.shape(test_x))
print("shape of test_y", np.shape(test_y))
test_y = test_y[:,:2000]
return get_data_into_loaders(test_x, test_y, flags.batch_size, SimulatedDataSet_regress, test_ratio=0.999)
# Read the data
data_dir = os.path.join(flags.data_dir, 'ADM_60k')
data_x = pd.read_csv(os.path.join(data_dir, 'data_x.csv'), header=None).astype('float32').values
data_y = pd.read_csv(os.path.join(data_dir, 'data_y.csv'), header=None).astype('float32').values
# The geometric boundary of peurifoy dataset is [30, 70], normalizing manually
data_x = normalize_np(data_x)
print("shape of data_x", np.shape(data_x))
data_y = data_y[:,:2000]
print("shape of data_y", np.shape(data_y))
if eval_data_all:
return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, test_ratio=0.999)
return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, test_ratio=flags.test_ratio)
def read_data_peurifoy(flags, eval_data_all=False):
"""
Data reader function for the gaussian mixture data set
:param flags: Input flags
:return: train_loader and test_loader in pytorch data set format (normalized)
"""
if flags.test_ratio == 0:
# Read the data
data_dir = os.path.join(flags.data_dir, 'Peurifoy', 'eval')
test_x = pd.read_csv(os.path.join(data_dir, 'test_x.csv'), header=None).astype('float32').values
test_y = pd.read_csv(os.path.join(data_dir, 'test_y.csv'), header=None).astype('float32').values
test_x = (test_x-50)/20.
print("shape of test_x", np.shape(test_x))
print("shape of test_y", np.shape(test_y))
return get_data_into_loaders(test_x, test_y, flags.batch_size, SimulatedDataSet_regress, test_ratio=0.999)
# Read the data
data_dir = os.path.join(flags.data_dir, 'Peurifoy')
data_x = pd.read_csv(os.path.join(data_dir, 'data_x.csv'), header=None).astype('float32').values
data_y = pd.read_csv(os.path.join(data_dir, 'data_y.csv'), header=None).astype('float32').values
data_x = (data_x-50)/20.
print("shape of data_x", np.shape(data_x))
print("shape of data_y", np.shape(data_y))
if eval_data_all:
return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, test_ratio=0.999)
return get_data_into_loaders(data_x, data_y, flags.batch_size, SimulatedDataSet_regress, test_ratio=flags.test_ratio)
def read_data_color(flags, eval_data_all=False):
if flags.test_ratio == 0:
# Read the data
data_dir = os.path.join(flags.data_dir, 'color', 'eval')
test_x = pd.read_csv(os.path.join(data_dir, 'test_x.csv'), header=None).astype('float32').values
test_y = pd.read_csv(os.path.join(data_dir, 'test_y.csv'), header=None).astype('float32').values
#test_x = normalize_np(test_x)
print("shape of test_x", np.shape(test_x))
print("shape of test_y", | np.shape(test_y) | numpy.shape |
#!/usr/bin/env python3
from netcdfTools import *
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import rfft, irfft, fftfreq
from utilities import filesFromList, writeLog
'''
Description:
Author: <NAME>
<EMAIL>
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='fourierFilterNetCdf.py')
parser.add_argument("fileKey", default=None,\
help="Search string for collecting files.")
parser.add_argument("-o", "--outstr",type=str, default="FFlt_",\
help="Prefix for the output NETCDF file. Default=FFlt_.")
parser.add_argument("-vn", "--varnames",type=str, nargs='+', default=['u'],\
help="Names of the V or V^prime comps in (x,y,z)-order. Default = ['u'].")
parser.add_argument("-vc", "--varcopy",type=str, nargs='+', default=None,\
help="Names of the variables which copied to the output file without filtering.")
parser.add_argument("-lf", "--lowfreq", type=float, default=0.01,\
help="Low frequency cutoff. FFT coefs will be zeroed for frequecies below this value.")
parser.add_argument("-c", "--coarse", type=int, default=1,\
help="Coarsening level. Int > 1. Default = 1.")
args = parser.parse_args()
writeLog( parser, args )
#==========================================================#
# Initial renaming operations and variable declarations
fileKey = args.fileKey
outstr = args.outstr
varnames = args.varnames
varcopy = args.varcopy
lowfreq = args.lowfreq
cl = abs(int(args.coarse))
'''
Establish two boolean variables which indicate whether the created variable is an
independent or dependent variable in function createNetcdfVariable().
'''
voDict = dict()
parameter = True; variable = False
# Obtain a list of files to include.
fileNos, fileList = filesFromList( fileKey+'*' )
for fn in fileNos:
fileout = outstr+fileList[fn].split('_')[-1]
parameter = True
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Create a NETCDF output dataset (dso) for writing out the data.
dso = netcdfOutputDataset( fileout )
for vn in varnames:
# Read in data.
dataDict = read3dDataFromNetCDF( fileList[fn] , [vn], cl )
vp = dataDict[vn]
if( parameter ):
# Coords and time:
x = dataDict.pop('x'); y = dataDict.pop('y'); z = dataDict.pop('z')
time = dataDict.pop('time'); time_dim = len(time)
dataDict = None
if( parameter ):
# Create the output independent variables right away and empty memory.
tv = createNetcdfVariable( dso, time,'time', time_dim,'s','f4',('time',), parameter )
xv = createNetcdfVariable( dso, x , 'x' , len(x) , 'm', 'f4', ('x',) , parameter )
x = None
yv = createNetcdfVariable( dso, y , 'y' , len(y) , 'm', 'f4', ('y',) , parameter )
y = None
zv = createNetcdfVariable( dso, z , 'z' , len(z) , 'm', 'f4', ('z',) , parameter )
z = None
parameter = False
# If our original signal time was in seconds, this is now in Hz
vfreq = fftfreq(len(vp[:,10,0,0]), d=time[1]-time[0])
Fvp = rfft(vp, axis=(0))
Fvp[(np.abs(vfreq)<lowfreq),:,:,:] = 0 # Filter step.
vpf = irfft(Fvp, axis=(0)) + np.mean( vp, axis=(0) )
Fvp = None
'''
plt.figure(1)
vm = np.mean( vp[:,10,0,0] )
plt.plot(time,vp[:,10,0,0],'b', time, vm+vpf[:,10,0,0],'r')
plt.figure(2)
plt.semilogy(vfreq, Fvp[:,0,0,0]); plt.show()
'''
# Filtered value:
voDict[vn] = createNetcdfVariable(\
dso, vpf, vn, time_dim, '[-]', 'f4',('time','z','y','x',) , variable )
# - - - - Done , finalize the output - - - - - - - - - -
for vc in varcopy:
dataDict = read3dDataFromNetCDF( fileList[fn] , [vc], cl )
vpc = dataDict.pop(vc)
if( len(np.shape( vpc )) == 4 ):
voDict[vc] = createNetcdfVariable(dso, vpc, vc, time_dim,'[-]','f4',('time','z','y','x',), variable)
elif( len( | np.shape(vpc) | numpy.shape |
import numpy as np
import matplotlib.pyplot as plt
import torch
#################################################
# sinusoid regression, from original MAML paper #
#################################################
# Quote from the paper:
# the amplitude varies within [0.1, 5.0] and the phase varies within [0, π],
# and the input and output both have a dimensionality of 1.
# During training and testing, datapoints x are sampled uniformly from [−5.0, 5.0].
# The loss is the mean-squared error
# between the prediction f(x) and true value.
# The regressor is a neural network model with 2 hidden layers of size
# 40 with ReLU nonlinearities. When training with MAML,
# we use one gradient update with K = 10 examples with
# a fixed step size α = 0.01, and use Adam as the metaoptimizer"
def sinusoid_get_random_task():
amplitude = | np.random.uniform(0.1, 5.0) | numpy.random.uniform |
import os
import sys
import obspy
import scipy
import pyasdf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.fftpack import next_fast_len
from obspy.signal.filter import bandpass
from seisgo import noise, stacking,utils
import pygmt as gmt
from obspy import UTCDateTime
def plot_eventsequence(cat,figsize=(12,4),ytype='magnitude',figname=None,
yrange=None,save=False,stem=True):
if isinstance(cat,obspy.core.event.catalog.Catalog):
cat=pd.DataFrame(utils.qml2list(cat))
elif isinstance(cat,list):
cat=pd.DataFrame(cat)
#All magnitudes greater than or equal to the limit will be plotted
plt.figure(figsize=figsize)
plt.title(ytype+" vs. time")
plt.xlabel("Date (UTC)")
plt.ylabel(ytype)
if yrange is not None:
ymin,ymax=yrange
if ytype.lower()=="magnitude":
cat2=cat[(cat.magnitude>=yrange[0]) & (cat.magnitude<=yrange[1]) ]
elif ytype.lower()=="depth":
cat2=cat[(cat.depth>=yrange[0]) & (cat.depth<=yrange[1]) ]
else:
cat2=cat
if ytype.lower()=="magnitude":
ymin=np.min(cat2.magnitude)*0.9
ymax=np.max(cat2.magnitude)*1.1
elif ytype.lower()=="depth":
ymin=np.min(cat2.depth)*0.9
ymax=np.max(cat2.depth)*1.1
t=[]
for i in range(len(cat2)):
tTime=obspy.UTCDateTime(cat2.iloc[i]["datetime"])
t.append(tTime.datetime)
if stem:
if ytype.lower()=="magnitude":
markerline, stemlines, baseline=plt.stem(t,cat2.magnitude,linefmt='k-',markerfmt="o",
bottom=ymin)
elif ytype.lower()=="depth":
markerline, stemlines, baseline=plt.stem(t,cat2.depth,linefmt='k-',markerfmt="o",
bottom=ymin)
markerline.set_markerfacecolor('r')
markerline.set_markeredgecolor('r')
else:
if ytype.lower()=="magnitude":
plt.scatter(t,cat2.magnitude,5,'k')
elif ytype.lower()=="depth":
plt.scatter(t,cat2.depth,cat2.magnitude,'k')
#
plt.grid(axis="both")
plt.ylim([ymin,ymax])
if save:
if figname is not None:
plt.savefig(figname)
else:
plt.savefig(ytype+"_vs_time.png")
else:
plt.show()
def plot_stations(lon,lat,region,markersize="c0.2c",title="station map",style="fancy",figname=None,
format='png',distance=None,projection="M5i", xshift="6i",frame="af"):
"""
lon, lat: could be list of vectors contaning multiple sets of stations. The number of sets must be the same
as the length of the marker list.
marker: a list specifying the symbols for each station set.
region: [minlon,maxlon,minlat,maxlat] for map view
"""
nsta=len(lon)
if isinstance(markersize,str):
markersize=[markersize]*nsta
fig = gmt.Figure()
gmt.config(MAP_FRAME_TYPE=style)
for i in range(nsta):
if i==0:
fig.coast(region=region, resolution="f",projection=projection, rivers='rivers',
water="cyan",frame=frame,land="white",
borders=["1/0.5p,gray,2/1p,gray"])
fig.basemap(frame='+t"'+title+'"')
fig.plot(
x=lon[i],
y=lat[i],
style=markersize[i],
color="red",
)
if figname is None:
figname='stationmap.'+format
fig.savefig(figname)
print('plot was saved to: '+figname)
##plot power spectral density
def plot_psd(data,dt,labels=None,xrange=None,cmap='jet',normalize=True,figsize=(13,5),\
save=False,figname=None,tick_inc=None):
"""
Plot the power specctral density of the data array.
=PARAMETERS=
data: 2-D array containing the data. the data to be plotted should be on axis 1 (second dimention)
dt: sampling inverval in time.
labels: row labels of the data, default is None.
cmap: colormap, default is 'jet'
time_format: format to show time marks, default is: '%Y-%m-%dT%H'
normalize: whether normalize the PSD in plotting, default is True
figsize: figure size, default: (13,5)
"""
data=np.array(data)
if data.ndim > 2:
raise ValueError('only plot 1-d arrya or 2d matrix for now. the input data has a dimention of %d'%(data.ndim))
f,psd=utils.psd(data,1/dt)
f=f[1:]
plt.figure(figsize=figsize)
ax=plt.subplot(111)
if data.ndim==2:
nwin=data.shape[0]
if tick_inc is None:
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
psdN=np.ndarray((psd.shape[0],psd.shape[1]-1))
for i in range(psd.shape[0]):
if normalize: psdN[i,:]=psd[i,1:]/np.max(np.abs(psd[i,1:]))
else: psdN[i,:]=psd[i,1:]
plt.imshow(psdN,aspect='auto',extent=[f.min(),f.max(),psdN.shape[0],0],cmap=cmap)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
if labels is not None: ax.set_yticklabels(labels[0:nwin:tick_inc])
if normalize: plt.colorbar(label='normalized PSD')
else: plt.colorbar(label='PSD')
else:
if normalize: psdN=psd[1:]/np.max(np.abs(psd[1:]))
else: psdN[i,:]=psd[1:]
plt.plot(f,psdN)
if xrange is None:plt.xlim([f[1],f[-1]])
else:
plt.xlim(xrange)
plt.xscale('log')
plt.xlabel('frequency (Hz)')
plt.title('PSD')
if save:
if figname is not None:
plt.savefig(figname)
else:
plt.savefig("PSD.png")
else:
plt.show()
#############################################################################
############### PLOTTING RAW SEISMIC WAVEFORMS ##########################
#############################################################################
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_waveform(sfile,net,sta,freqmin,freqmax,save=False,figdir=None,format='pdf'):
'''
display the downloaded waveform for station A
PARAMETERS:
-----------------------
sfile: containing all wavefrom data for a time-chunck in ASDF format
net,sta,comp: network, station name and component
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
USAGE:
-----------------------
plot_waveform('temp.h5','CI','BLC',0.01,0.5)
'''
# open pyasdf file to read
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
sta_list = ds.waveforms.list()
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# check whether station exists
tsta = net+'.'+sta
if tsta not in sta_list:
raise ValueError('no data for %s in %s'%(tsta,sfile))
tcomp = ds.waveforms[tsta].get_waveform_tags()
ncomp = len(tcomp)
if ncomp==0:
print('no data found for the specified net.sta.')
return None
tr = ds.waveforms[tsta][tcomp[0]]
dt = tr[0].stats.delta
npts = tr[0].stats.npts
tt = np.arange(0,npts)*dt
if ncomp == 1:
data = tr[0].data
data = bandpass(data,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
fig=plt.figure(figsize=(9,3))
plt.plot(tt,data,'k-',linewidth=1)
plt.title('T\u2080:%s %s.%s.%s @%5.3f-%5.2f Hz' % (tr[0].stats.starttime,net,sta,tcomp[0].split('_')[0].upper(),freqmin,freqmax))
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.tight_layout()
plt.show()
else:
data = np.zeros(shape=(ncomp,npts),dtype=np.float32)
for ii in range(ncomp):
data[ii] = ds.waveforms[tsta][tcomp[ii]][0].data
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
fig=plt.figure(figsize=(9,6))
for c in range(ncomp):
if c==0:
plt.subplot(ncomp,1,1)
plt.plot(tt,data[0],'k-',linewidth=1)
plt.title('T\u2080:%s %s.%s @%5.3f-%5.2f Hz' % (tr[0].stats.starttime,net,sta,freqmin,freqmax))
plt.legend([tcomp[0].split('_')[0].upper()],loc='upper left')
plt.xlabel('Time [s]')
else:
plt.subplot(ncomp,1,c+1)
plt.plot(tt,data[c],'k-',linewidth=1)
plt.legend([tcomp[c].split('_')[0].upper()],loc='upper left')
plt.xlabel('Time [s]')
fig.tight_layout()
if save:
if not os.path.isdir(figdir):os.mkdir(figdir)
sfilebase=sfile.split('/')[-1]
outfname = figdir+'/{0:s}_{1:s}.{2:s}'.format(sfilebase.split('.')[0],net,sta)
fig.savefig(outfname+'.'+format, format=format, dpi=300)
plt.close()
else:
fig.show()
#############################################################################
###############PLOTTING XCORR RESULTS AS THE OUTPUT OF SEISGO ##########################
#############################################################################
def plot_xcorr_substack(sfile,freqmin,freqmax,lag=None,comp='ZZ',
save=True,figdir=None):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
sfile: cross-correlation functions outputed by SeisGo workflow
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
--------------------------
plot_xcorr_substack('temp.h5',0.1,1,100,True,'./')
Note: IMPORTANT!!!! this script only works for cross-correlation with sub-stacks being set to True in S1.
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
spairs = ds.auxiliary_data.list()
path_lists = ds.auxiliary_data[spairs[0]].list()
flag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['substack']
dt = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['dt']
maxlag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# only works for cross-correlation with substacks generated
if not flag:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# lags for display
if not lag:lag=maxlag
lag0=np.min([1.0*lag,maxlag])
if lag>maxlag:raise ValueError('lag excceds maxlag!')
# t is the time labels for plotting
if lag>=5:
tstep=int(int(lag)/5)
t1=np.arange(-int(lag),0,step=tstep)
t2=np.arange(0,int(lag+0.5*tstep),step=tstep)
t=np.concatenate((t1,t2))
else:
tstep=lag/5
t1=np.arange(-lag,0,step=tstep)
t2=np.arange(0,lag+0.5*tstep,step=tstep)
t=np.concatenate((t1,t2))
indx1 = int((maxlag-lag0)/dt)
indx2 = indx1+2*int(lag0/dt)+1
for spair in spairs:
ttr = spair.split('_')
net1,sta1 = ttr[0].split('.')
net2,sta2 = ttr[1].split('.')
path_lists = ds.auxiliary_data[spair].list()
for ipath in path_lists:
chan1,chan2 = ipath.split('_')
cc_comp=chan1[-1]+chan2[-1]
if cc_comp == comp or comp=='all' or comp=='ALL':
try:
dist = ds.auxiliary_data[spair][ipath].parameters['dist']
ngood= ds.auxiliary_data[spair][ipath].parameters['ngood']
ttime= ds.auxiliary_data[spair][ipath].parameters['time']
except Exception:
print('continue! something wrong with %s %s'%(spair,ipath))
continue
# cc matrix
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
data = ds.auxiliary_data[spair][ipath].data[:,indx1:indx2]
# print(data.shape)
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
if nwin==0 or len(ngood)==1: print('continue! no enough substacks!');continue
tmarks = []
data_normalizd=data
# load cc for each station-pair
for ii in range(nwin):
data[ii] = bandpass(data[ii],freqmin,freqmax,1/dt,corners=4, zerophase=True)
data[ii] = data[ii]-np.mean(data[ii])
amax[ii] = np.max(np.abs(data[ii]))
data_normalizd[ii] = data[ii]/amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
tmarks.append(obspy.UTCDateTime(ttime[ii]).strftime('%Y-%m-%dT%H:%M:%S'))
dstack_mean=np.mean(data,axis=0)
dstack_robust=stacking.robust_stack(data)[0]
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(5,1,(1,3))
ax.matshow(data_normalizd,cmap='seismic',extent=[-lag0,lag0,nwin,0],aspect='auto')
ax.plot((0,0),(nwin,0),'k-')
ax.set_title('%s.%s.%s %s.%s.%s dist:%5.2fkm' % (net1,sta1,chan1,net2,sta2,chan2,dist))
ax.set_xlabel('time [s]')
ax.set_xticks(t)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
# ax.set_yticklabels(np.arange(0,nwin,step=tick_inc))
ax.set_yticklabels(tmarks[0:nwin:tick_inc])
ax.set_xlim([-lag,lag])
ax.xaxis.set_ticks_position('bottom')
ax1 = fig.add_subplot(5,1,(4,5))
ax1.set_title('stack at %4.2f-%4.2f Hz'%(freqmin,freqmax))
tstack=np.arange(-lag0,lag0+0.5*dt,dt)
if len(tstack)>len(dstack_mean):tstack=tstack[:-1]
ax1.plot(tstack,dstack_mean,'b-',linewidth=1,label='mean')
ax1.plot(tstack,dstack_robust,'r-',linewidth=1,label='robust')
ax1.set_xlabel('time [s]')
ax1.set_xticks(t)
ax1.set_xlim([-lag,lag])
ylim=ax1.get_ylim()
ax1.plot((0,0),ylim,'k-')
ax1.set_ylim(ylim)
ax1.legend(loc='upper right')
ax1.grid()
# ax2 = fig.add_subplot(414)
# ax2.plot(amax/min(amax),'r-')
# ax2.plot(ngood,'b-')
# ax2.set_xlabel('waveform number')
# ax2.set_xticks(np.arange(0,nwin,step=tick_inc))
# ax2.set_xticklabels(tmarks[0:nwin:tick_inc])
# #for tick in ax[2].get_xticklabels():
# # tick.set_rotation(30)
# ax2.legend(['relative amp','ngood'],loc='upper right')
fig.tight_layout()
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.isdir(figdir):os.mkdir(figdir)
outfname = figdir+\
'/{0:s}.{1:s}.{2:s}_{3:s}.{4:s}.{5:s}_{6:s}-{7:s}Hz.png'.format(net1,sta1,\
chan1,net2,\
sta2,chan2,
str(freqmin),str(freqmax))
fig.savefig(outfname, format='png', dpi=400)
print('saved to: '+outfname)
plt.close()
else:
fig.show()
def plot_corrfile(sfile,freqmin,freqmax,lag=None,comp='ZZ',
save=True,figname=None,format='png',figdir=None):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
sfile: cross-correlation functions outputed by SeisGo workflow
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
--------------------------
plot_corrfile('temp.h5',0.1,1,100,True,'./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
corrdict=noise.extract_corrdata(sfile,comp=comp)
clist=list(corrdict.keys())
for c in clist:
corr=corrdict[c]
if comp in list(corr.keys()):
corr[comp].plot(freqmin=freqmin,freqmax=freqmax,lag=lag,save=save,figdir=figdir,
figname=figname,format=format)
def plot_corrdata(corr,freqmin=None,freqmax=None,lag=None,save=False,figdir=None,figsize=(10,8)):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
corr: : class:`~seisgo.types.CorrData`
CorrData object containing the correlation functions and the metadata.
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
--------------------------
plot_corrdata(corr,0.1,1,100,save=True,figdir='./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
netstachan1 = corr.net[0]+'.'+corr.sta[0]+'.'+corr.loc[0]+'.'+corr.chan[0]
netstachan2 = corr.net[1]+'.'+corr.sta[1]+'.'+corr.loc[1]+'.'+corr.chan[1]
dt,maxlag,dist,ngood,ttime,substack = [corr.dt,corr.lag,corr.dist,corr.ngood,corr.time,corr.substack]
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
lag0=np.min([1.0*lag,maxlag])
# t is the time labels for plotting
if lag>=5:
tstep=int(int(lag)/5)
t1=np.arange(-int(lag),0,step=tstep);t2=np.arange(0,int(lag+0.5*tstep),step=tstep)
t=np.concatenate((t1,t2))
else:
tstep=lag/5
t1=np.arange(-lag,0,step=tstep);t2=np.arange(0,lag+0.5*tstep,step=tstep)
t=np.concatenate((t1,t2))
indx1 = int((maxlag-lag0)/dt);indx2 = indx1+2*int(lag0/dt)+1
# cc matrix
if substack:
data = corr.data[:,indx1:indx2]
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
# print(data.shape)
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
if nwin==0 or len(ngood)==1:
print('continue! no enough trace to plot!')
return
tmarks = []
data_normalizd=data
# load cc for each station-pair
for ii in range(nwin):
if freqmin is not None and freqmax is not None:
data[ii] = bandpass(data[ii],freqmin,freqmax,1/dt,corners=4, zerophase=True)
data[ii] = data[ii]-np.mean(data[ii])
amax[ii] = np.max(np.abs(data[ii]))
data_normalizd[ii] = data[ii]/amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
tmarks.append(obspy.UTCDateTime(ttime[ii]).strftime('%Y-%m-%dT%H:%M:%S'))
dstack_mean=np.mean(data,axis=0)
# dstack_robust=stack.robust_stack(data)[0]
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(6,1,(1,4))
ax.matshow(data_normalizd,cmap='seismic',extent=[-lag0,lag0,nwin,0],aspect='auto')
ax.plot((0,0),(nwin,0),'k-')
if freqmin is not None and freqmax is not None:
ax.set_title('%s-%s : dist : %5.2f km : %4.2f-%4.2f Hz' % (netstachan1,netstachan2,
dist,freqmin,freqmax))
else:
ax.set_title('%s-%s : dist : %5.2f km : unfiltered' % (netstachan1,netstachan2,dist))
ax.set_xlabel('time [s]')
ax.set_xticks(t)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
ax.set_yticklabels(tmarks[0:nwin:tick_inc])
ax.set_xlim([-lag,lag])
ax.xaxis.set_ticks_position('bottom')
ax1 = fig.add_subplot(6,1,(5,6))
if freqmin is not None and freqmax is not None:
ax1.set_title('stack at %4.2f-%4.2f Hz'%(freqmin,freqmax))
else:
ax1.set_title('stack: unfiltered')
tstack=np.arange(-lag0,lag0+0.5*dt,dt)
if len(tstack)>len(dstack_mean):tstack=tstack[:-1]
ax1.plot(tstack,dstack_mean,'b-',linewidth=1,label='mean')
# ax1.plot(tstack,dstack_robust,'r-',linewidth=1,label='robust')
ax1.set_xlabel('time [s]')
ax1.set_xticks(t)
ax1.set_xlim([-lag,lag])
ylim=ax1.get_ylim()
ax1.plot((0,0),ylim,'k-')
ax1.set_ylim(ylim)
ax1.legend(loc='upper right')
ax1.grid()
fig.tight_layout()
else: #only one trace available
data = corr.data[indx1:indx2]
# load cc for each station-pair
if freqmin is not None and freqmax is not None:
data = bandpass(data,freqmin,freqmax,1/dt,corners=4, zerophase=True)
data = data-np.mean(data)
amax = np.max(np.abs(data))
data /= amax
timestamp = obspy.UTCDateTime(ttime)
tmarks=obspy.UTCDateTime(ttime).strftime('%Y-%m-%dT%H:%M:%S')
tx=np.arange(-lag0,lag0+0.5*dt,dt)
if len(tx)>len(data):tx=tx[:-1]
plt.figure(figsize=figsize)
ax=plt.gca()
plt.plot(tx,data,'k-',linewidth=1)
if freqmin is not None and freqmax is not None:
plt.title('%s-%s : dist : %5.2f km : %4.2f-%4.2f Hz' % (netstachan1,netstachan2,
dist,freqmin,freqmax))
else:
plt.title('%s-%s : dist : %5.2f km : unfiltered' % (netstachan1,netstachan2,dist))
plt.xlabel('time [s]')
plt.xticks(t)
ylim=ax.get_ylim()
plt.plot((0,0),ylim,'k-')
plt.ylim(ylim)
plt.xlim([-lag,lag])
ax.grid()
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.isdir(figdir):os.mkdir(figdir)
outfname = figdir+\
'/{0:s}_{1:s}_{2:s}-{3:s}Hz.png'.format(netstachan1,netstachan2,
str(freqmin),str(freqmax))
plt.savefig(outfname, format='png', dpi=300)
print('saved to: '+outfname)
plt.close()
else:
plt.show()
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_xcorr_substack_spect(sfile,freqmin,freqmax,lag=None,save=True,figdir='./'):
'''
display the amplitude spectrum of the cross-correlation functions for a time-chunck.
PARAMETERS:
-----------------------
sfile: cross-correlation functions outputed by S1
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
-----------------------
plot_xcorr_substack_spect('temp.h5',0.1,1,200,True,'./')
Note: IMPORTANT!!!! this script only works for the cross-correlation with sub-stacks in S1.
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
spairs = ds.auxiliary_data.list()
path_lists = ds.auxiliary_data[spairs[0]].list()
flag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['substack']
dt = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['dt']
maxlag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# only works for cross-correlation with substacks generated
if not flag:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=int(2*int(lag)/4))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
nfft = int(next_fast_len(indx2-indx1))
freq = scipy.fftpack.fftfreq(nfft,d=dt)[:nfft//2]
for spair in spairs:
ttr = spair.split('_')
net1,sta1 = ttr[0].split('.')
net2,sta2 = ttr[1].split('.')
for ipath in path_lists:
chan1,chan2 = ipath.split('_')
try:
dist = ds.auxiliary_data[spair][ipath].parameters['dist']
ngood= ds.auxiliary_data[spair][ipath].parameters['ngood']
ttime= ds.auxiliary_data[spair][ipath].parameters['time']
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
except Exception:
print('continue! something wrong with %s %s'%(spair,ipath))
continue
# cc matrix
data = ds.auxiliary_data[spair][ipath].data[:,indx1:indx2]
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
spec = np.zeros(shape=(nwin,nfft//2),dtype=np.complex64)
if nwin==0 or len(ngood)==1: print('continue! no enough substacks!');continue
# load cc for each station-pair
for ii in range(nwin):
spec[ii] = scipy.fftpack.fft(data[ii],nfft,axis=0)[:nfft//2]
spec[ii] /= np.max(np.abs(spec[ii]),axis=0)
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = max(data[ii])
data[ii] /= amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig,ax = plt.subplots(3,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-lag,lag,nwin,0],aspect='auto')
ax[0].set_title('%s.%s.%s %s.%s.%s dist:%5.2f km' % (net1,sta1,chan1,net2,sta2,chan2,dist))
ax[0].set_xlabel('time [s]')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:-1:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].matshow(np.abs(spec),cmap='seismic',extent=[freq[0],freq[-1],nwin,0],aspect='auto')
ax[1].set_xlabel('freq [Hz]')
ax[1].set_ylabel('amplitudes')
ax[1].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[1].xaxis.set_ticks_position('bottom')
ax[2].plot(amax/min(amax),'r-')
ax[2].plot(ngood,'b-')
ax[2].set_xlabel('waveform number')
#ax[1].set_xticks(np.arange(0,nwin,int(nwin/5)))
ax[2].legend(['relative amp','ngood'],loc='upper right')
fig.tight_layout()
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.ifigdir(figdir):os.mkdir(figdir)
outfname = figdir+'/{0:s}.{1:s}.{2:s}_{3:s}.{4:s}.{5:s}.pdf'.format(net1,sta1,chan1,net2,sta2,chan2)
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
#############################################################################
###############PLOTTING THE POST-STACKING XCORR FUNCTIONS AS OUTPUT OF S2 STEP IN NOISEPY ##########################
#############################################################################
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_substack_all(sfile,freqmin,freqmax,comp,lag=None,save=False,figdir=None):
'''
display the 2D matrix of the cross-correlation functions stacked for all time windows.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
comp: cross component of the targeted cc functions
USAGE:
----------------------
plot_substack_all('temp.h5',0.1,1,'ZZ',50,True,'./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
paths = comp
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
dtype_lists = ds.auxiliary_data.list()
dt = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dt']
dist = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dist']
maxlag = ds.auxiliary_data[dtype_lists[0]][paths].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
if len(dtype_lists)==1:
raise ValueError('Abort! seems no substacks have been done')
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=int(2*int(lag)/4))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
# other parameters to keep
nwin = len(dtype_lists)-1
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
ngood= np.zeros(nwin,dtype=np.int16)
ttime= np.zeros(nwin,dtype=np.int)
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
amax = np.zeros(nwin,dtype=np.float32)
for ii,itype in enumerate(dtype_lists[2:]):
timestamp[ii] = obspy.UTCDateTime(np.float(itype[1:]))
try:
ngood[ii] = ds.auxiliary_data[itype][paths].parameters['ngood']
ttime[ii] = ds.auxiliary_data[itype][paths].parameters['time']
#timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# cc matrix
data[ii] = ds.auxiliary_data[itype][paths].data[indx1:indx2]
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = np.max(data[ii])
data[ii] /= amax[ii]
except Exception as e:
print(e);continue
if len(ngood)==1:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# plotting
if nwin>100:
tick_inc = int(nwin/10)
elif nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig,ax = plt.subplots(2,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-lag,lag,nwin,0],aspect='auto')
ax[0].set_title('%s dist:%5.2f km filtered at %4.2f-%4.2fHz' % (sfile.split('/')[-1],dist,freqmin,freqmax))
ax[0].set_xlabel('time [s]')
ax[0].set_ylabel('wavefroms')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].plot(amax/max(amax),'r-')
ax[1].plot(ngood,'b-')
ax[1].set_xlabel('waveform number')
ax[1].set_xticks(np.arange(0,nwin,nwin//5))
ax[1].legend(['relative amp','ngood'],loc='upper right')
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.ifigdir(figdir):os.mkdir(figdir)
outfname = figdir+'/{0:s}_{1:4.2f}_{2:4.2f}Hz.pdf'.format(sfile.split('/')[-1],freqmin,freqmax)
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_substack_all_spect(sfile,freqmin,freqmax,comp,lag=None,save=False,figdir=None):
'''
display the amplitude spectrum of the cross-correlation functions stacked for all time windows.
PARAMETERS:
-----------------------
sfile: cross-correlation functions outputed by S2
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
comp: cross component of the targeted cc functions
USAGE:
-----------------------
plot_substack_all('temp.h5',0.1,1,'ZZ',50,True,'./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
paths = comp
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
dtype_lists = ds.auxiliary_data.list()
dt = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dt']
dist = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dist']
maxlag = ds.auxiliary_data[dtype_lists[0]][paths].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
if len(dtype_lists)==1:
raise ValueError('Abort! seems no substacks have been done')
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=int(2*int(lag)/4))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
nfft = int(next_fast_len(indx2-indx1))
freq = scipy.fftpack.fftfreq(nfft,d=dt)[:nfft//2]
# other parameters to keep
nwin = len(dtype_lists)-1
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
spec = np.zeros(shape=(nwin,nfft//2),dtype=np.complex64)
ngood= np.zeros(nwin,dtype=np.int16)
ttime= np.zeros(nwin,dtype=np.int)
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
amax = np.zeros(nwin,dtype=np.float32)
for ii,itype in enumerate(dtype_lists[1:]):
timestamp[ii] = obspy.UTCDateTime(np.float(itype[1:]))
try:
ngood[ii] = ds.auxiliary_data[itype][paths].parameters['ngood']
ttime[ii] = ds.auxiliary_data[itype][paths].parameters['time']
#timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# cc matrix
tdata = ds.auxiliary_data[itype][paths].data[indx1:indx2]
spec[ii] = scipy.fftpack.fft(tdata,nfft,axis=0)[:nfft//2]
spec[ii] /= np.max(np.abs(spec[ii]))
data[ii] = bandpass(tdata,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = np.max(data[ii])
data[ii] /= amax[ii]
except Exception as e:
print(e);continue
if len(ngood)==1:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# plotting
tick_inc = 50
fig,ax = plt.subplots(3,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-lag,lag,nwin,0],aspect='auto')
ax[0].set_title('%s dist:%5.2f km' % (sfile.split('/')[-1],dist))
ax[0].set_xlabel('time [s]')
ax[0].set_ylabel('wavefroms')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].matshow(np.abs(spec),cmap='seismic',extent=[freq[0],freq[-1],nwin,0],aspect='auto')
ax[1].set_xlabel('freq [Hz]')
ax[1].set_ylabel('amplitudes')
ax[1].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[1].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[1].xaxis.set_ticks_position('bottom')
ax[2].plot(amax/max(amax),'r-')
ax[2].plot(ngood,'b-')
ax[2].set_xlabel('waveform number')
ax[2].set_xticks(np.arange(0,nwin,nwin//15))
ax[2].legend(['relative amp','ngood'],loc='upper right')
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.ifigdir(figdir):os.mkdir(figdir)
outfname = figdir+'/{0:s}.pdf'.format(sfile.split('/')[-1])
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
'''
Modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_xcorr_moveout_heatmap(sfiles,sta,dtype,freq,comp,dist_inc,lag=None,save=False,\
figsize=None,format='png',figdir=None):
'''
display the moveout (2D matrix) of the cross-correlation functions stacked for all time chuncks.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
sta: station name as the virtual source.
dtype: datatype either 'Allstack_pws' or 'Allstack_linear'
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
comp: cross component
dist_inc: distance bins to stack over
lag: lag times for displaying
save: set True to save the figures (in pdf format)
figdir: diresied directory to save the figure (if not provided, save to default dir)
USAGE:
----------------------
plot_xcorr_moveout_heatmap('temp.h5','sta','Allstack_pws',0.1,0.2,1,'ZZ',200,True,'./temp')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
if not isinstance(freq[0],list):freq=[freq]
freq=np.array(freq)
figlabels=['(a)','(b)','(c)','(d)','(e)','(f)','(g)','(h)','(i)']
if freq.shape[0]>9:
raise ValueError('freq includes more than 9 (maximum allowed for now) elements!')
elif freq.shape[0]==9:
subplot=[3,3]
figsize0=[14,7.5]
elif freq.shape[0] >=7 and freq.shape[0] <=8:
subplot=[2,4]
figsize0=[18,10]
elif freq.shape[0] >=5 and freq.shape[0] <=6:
subplot=[2,3]
figsize0=[14,7.5]
elif freq.shape[0] ==4:
subplot=[2,2]
figsize0=[10,6]
else:
subplot=[1,freq.shape[0]]
if freq.shape[0]==3:
figsize0=[13,3]
elif freq.shape[0]==2:
figsize0=[8,3]
else:
figsize0=[4,3]
if figsize is None:figsize=figsize0
path = comp
receiver = sta+'.h5'
stack_method = dtype.split('_')[-1]
# extract common variables
try:
ds = pyasdf.ASDFDataSet(sfiles[0],mpi=False,mode='r')
dt = ds.auxiliary_data[dtype][path].parameters['dt']
maxlag= ds.auxiliary_data[dtype][path].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfiles[0]);sys.exit()
# lags for display
if lag is None:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=(int(2*int(lag)/4)))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
# cc matrix
nwin = len(sfiles)
data0 = | np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32) | numpy.zeros |
import numpy as np
import pandas as pd
def structConfMat(confmat, index=0, multiple=False):
"""
Creates a pandas dataframe from the confusion matrix. It distinguishes
between binary and multi-class classification.
Parameters
----------
confmat : numpy.ndarray
Array with n rows, each of one being a flattened confusion matrix.
index : INT, optional
Integer for index of the dataframe. The default is 0.
multiple : BOOL, optional
If True, returns metrics per CV fold. If False, returns mean and std
of the metric over all folds (in complex format).
Returns
-------
performance : pd.DataFrame
Dataframe with all classification performance metrics.
Use "{0.real:.3} [{0.imag:.2}]".format to display float_format in latex
Example for latex tables:
print(structConfMat(confmat,multiple=False)
.to_latex(float_format="{0.real:.3} [{0.imag:.2}]".format))
Note: for coonverting multiple performance to average/std use
(performance.mean() + 1j*performance.std()).to_frame().T
"""
intdim = int(np.sqrt(confmat.shape[1]))
conf_n = confmat.reshape((len(confmat), intdim, intdim))
corrects = conf_n.transpose(2,1,0).reshape((-1,len(conf_n)))[::(intdim+1)]
corrects = corrects.sum(axis=0)
n_folds = conf_n.sum(axis=1).sum(axis=1)
cr = corrects/n_folds
aux_n = conf_n[:,0][:,0]/conf_n[:,0].sum(axis=1)
for ix in range(intdim-1):
aux_n = np.c_[aux_n, conf_n[:,ix+1][:,ix+1]/conf_n[:,ix+1].sum(axis=1)]
b_acc = np.nanmean(aux_n, axis=1)
performance = pd.DataFrame({'CorrectRate': cr, 'ErrorRate': 1-cr,
'balAcc': b_acc},
index=index+np.arange(confmat.shape[0]))
for ix in range(aux_n.shape[1]):
auxperf = pd.DataFrame({f'Class_{ix}': aux_n[:,ix]},
index=index+np.arange(confmat.shape[0]))
performance = pd.concat((performance, auxperf),axis=1)
if intdim==2:
columns = performance.columns.tolist()
columns[columns.index('Class_0')]='Sensitivity'
columns[columns.index('Class_1')]='Specificity'
performance.columns = columns
prec = aux_n[:,1]/(aux_n[:,1]+1-aux_n[:,0])
f1 = 2*prec*aux_n[:,1]/(prec+aux_n[:,1])
performance['Precision'] = prec
performance['F1'] = f1
if multiple==False:
performance = (performance.mean(skipna=True)
+ 1j*performance.std(skipna=True)).to_frame().T
return performance
def regressionMetrics(predicted, originals, index=0):
"""
Creates regression performance metrics.
Parameters
----------
predicted : list of arrays (one array per cv iteration)
Predicted outcome of the regression model.
originals : list of arrays (one array per cv iteration)
Original outcome (true) of the regression model.
index : INT, optional
Integer for index of the dataframe. The default is 0.
Returns
-------
performance : pd.DataFrame
Dataframe with all the performance metrics.
"""
from sklearn.metrics import r2_score
MSE = []
MAE = []
R2 = []
RMSE = []
for ix in range(len(predicted)):
MSE.append(np.mean((originals[ix] - predicted[ix])**2))
MAE.append(np.mean(abs(originals[ix] - predicted[ix])))
RMSE.append(np.sqrt(MSE[ix]))
R2.append(r2_score(originals[ix], predicted[ix]))
performance = pd.DataFrame({'MAE': | np.mean(MAE) | numpy.mean |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import platform
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.amp as amp
import megengine.config as config
import megengine.core.ops.builtin as builtin
import megengine.core.tensor.dtype as dtype
import megengine.functional as F
import megengine.jit as jit
from megengine import Parameter, Tensor, is_cuda_available, tensor
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.device import get_device_count
from megengine.module import LayerNorm
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
maskv2 = np.array([1, 1, 1], dtype=np.bool_)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.bool_)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
def test_dropout():
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.ops import set_global_rng_seed
def test_dropout_with_shape(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out = F.nn.dropout(data, rate, training=True)
gm.backward(out, tensor(np.ones(shape, dtype=np.float32)))
assert not out.numpy().all()
np.testing.assert_allclose(out.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_multiple_dropout(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(out1, rate, training=True)
out3 = F.nn.dropout(out2, rate, training=True)
gm.backward(out3, tensor(np.ones(shape, dtype=np.float32)))
np.testing.assert_allclose(out3.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_dropout_seed(shape, rate):
data = tensor(np.random.randn(*shape), dtype="float32")
set_global_rng_seed(111)
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out2.numpy()).all()
set_global_rng_seed(111)
out3 = F.nn.dropout(data, rate, training=True)
assert (out1.numpy() == out3.numpy()).all()
set_global_rng_seed(222)
out4 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out4.numpy()).all()
test_dropout_with_shape([13, 17, 63, 21], 0.4)
test_dropout_with_shape([16, 32, 64], 0.3)
test_multiple_dropout([1024], 0.2)
test_dropout_seed([16, 32], 0.2)
def test_matinv():
shape1 = (5, 5)
shape2 = (3, 9, 9)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
# make matrix diagonally dominant for numerical stability
data1 += (np.eye(shape1[0]) * shape1[0]).astype("float32")
data2 += np.broadcast_to((np.eye(shape2[1]) * shape2[1]).astype("float32"), shape2)
cases = [
{"input": data1},
{"input": data2},
]
opr_test(
cases,
F.matinv,
compare_fn=lambda x, y: np.testing.assert_allclose(x.numpy(), y, rtol=1e-4),
ref_fn=np.linalg.inv,
)
def test_matmul():
shape1 = 3
shape2 = 3
shape3 = (3, 5)
shape4 = (5, 6)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
batch_size = 10
shape1 = (2,)
shape2 = (batch_size, 2, 3)
shape3 = (batch_size, 3, 4)
shape4 = (batch_size, 10, 4, 2)
shape5 = (batch_size, 10, 2, 4)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
data5 = np.random.random(shape5).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
{"input": [data4, data5]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
opr_test(
[{"input": [data1, data4]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x, y.transpose(0, 1, 3, 2)),
transpose_b=True,
)
opr_test(
[{"input": [data3, data2]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x.transpose(0, 2, 1), y.transpose(0, 2, 1)),
transpose_a=True,
transpose_b=True,
)
@pytest.mark.parametrize(
"shape_a, shape_b", [((0,), (0,)), ((10, 0), (0, 10)), ((3, 10, 0), (3, 0, 10)),],
)
@pytest.mark.parametrize("is_symbolic", [None, True, False])
def test_matmul_empty_tensor(shape_a, shape_b, is_symbolic):
def func(a, b):
return F.matmul(a, b)
if is_symbolic is not None:
func = jit.trace(symbolic=is_symbolic)(func)
a = tensor(np.random.randn(*shape_a))
b = tensor(np.random.randn(*shape_b))
for _ in range(3):
out = func(a, b)
assert np.all(out.numpy() == 0)
if is_symbolic is None:
break
def test_interpolate():
def linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
out = F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
out2 = F.vision.interpolate(inp, 4, mode="linear")
np.testing.assert_allclose(
out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
np.testing.assert_allclose(
out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
def many_batch_interpolate():
inp = tensor(np.arange(1, 9, dtype=np.float32).reshape(2, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4])
out2 = F.vision.interpolate(inp, scale_factor=2.0)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def assign_corner_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4], align_corners=True)
out2 = F.vision.interpolate(inp, scale_factor=2.0, align_corners=True)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def error_shape_linear_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
def inappropriate_scale_linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=[2.0, 3.0], mode="linear")
linear_interpolate()
many_batch_interpolate()
assign_corner_interpolate()
error_shape_linear_interpolate()
inappropriate_scale_linear_interpolate()
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
def _gen_roi_inp():
inp_feat = np.random.randn(2, 32, 256, 256)
rois = np.zeros((4, 5))
rois[:, 0] = [0, 0, 1, 1]
rois[:, 1:3] = np.random.rand(4, 2) * 100
rois[:, 3:] = np.random.rand(4, 2) * 100 + 150
inp_feat = tensor(inp_feat)
rois = tensor(rois)
return inp_feat, rois
def test_roi_align():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_align(
inp_feat,
rois,
output_shape=output_shape,
mode="average",
spatial_scale=1.0 / 4,
sample_points=2,
aligned=True,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def _gen_correlation(random=True, constant=1, image_shape=(2, 1, 160, 160)):
if random:
inp_feat1 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
inp_feat2 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
else:
inp_feat1 = np.ones(image_shape) * constant
inp_feat2 = np.ones(image_shape) * constant
return tensor(inp_feat1), tensor(inp_feat2)
def test_correlation():
##test case 0 check the grad shape
data1, data2 = _gen_correlation()
grad = Grad().wrt(data1, callback=_save_to(data1))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=5,
max_displacement=4,
stride1=2,
stride2=2,
pad_size=2,
is_multiply=True,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(data1.grad.shape) == make_shape_tuple(data1.shape)
##test case 1 from https://github.com/NVIDIA/flownet2-pytorch/issues/194
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=True,
)
assert abs(out_feat.sum() - 1) < 1e-9
##test case 2 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 3 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 4 check correlation
data1, _ = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=2.0
)
_, data2 = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=1.0
)
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=2,
stride1=1,
stride2=2,
pad_size=0,
is_multiply=False,
)
assert abs(out_feat.mean() - 1) < 1e-9
def test_roi_pooling():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_pooling(
inp_feat, rois, output_shape=output_shape, mode="max", scale=1.0 / 4,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def test_adaptive_avg_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_avg_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[2.5, 4.5], [10.5, 12.5]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
]
]
],
dtype=np.float32,
),
)
def test_adaptive_max_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_max_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[5, 7], [13, 15]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
]
]
],
dtype=np.float32,
),
)
def test_one_hot():
def onehot_low_dimension():
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp, num_classes=4)
np.testing.assert_allclose(
out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
)
def onehot_high_dimension():
arr = np.array(
[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
dtype=np.int32,
)
inp = tensor(arr)
out = F.one_hot(inp, 10)
np.testing.assert_allclose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
onehot_low_dimension()
onehot_high_dimension()
def test_interpolate_fastpath():
# check shape
test_cases = [
[(1, 1, 10, 10), (5, 5)],
[(1, 3, 10, 10), (20, 20)],
[(10, 1, 10, 10), (1, 1)],
# [(10, 10, 1, 1), (10, 10)], # FIXME, it causes random CI failure
]
for inp_shape, target_shape in test_cases:
x = tensor(np.random.randn(*inp_shape), dtype=np.float32)
out = F.vision.interpolate(x, target_shape, mode="bilinear")
assert out.shape[0] == x.shape[0] and out.shape[1] == x.shape[1]
assert out.shape[2] == target_shape[0] and out.shape[3] == target_shape[1]
# check value
x = tensor(np.ones((3, 3, 10, 10)), dtype=np.float32)
out = F.vision.interpolate(x, (15, 5), mode="bilinear")
np.testing.assert_equal(out.numpy(), np.ones((3, 3, 15, 5)).astype(np.float32))
np_x = np.arange(32)
x = tensor(np_x).astype(np.float32).reshape(1, 1, 32, 1)
out = F.vision.interpolate(x, (1, 1), mode="bilinear")
np.testing.assert_equal(out.item(), np_x.mean())
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective(dt):
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
outp = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(outp.numpy(), np.array([[[[5, 6], [9, 10]]]], dtype=dt))
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective_mat_idx(dt):
inp_shape = (2, 1, 4, 4)
x = tensor(np.arange(32, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
M = F.concat([M,] * 4, 0)
outp = F.vision.warp_perspective(x, M, (2, 2), mat_idx=[0, 1, 1, 0])
np.testing.assert_equal(
outp.numpy(),
np.array(
[
[[[5, 6], [9, 10]]],
[[[21, 22], [25, 26]]],
[[[21, 22], [25, 26]]],
[[[5, 6], [9, 10]]],
],
dtype=dt,
),
)
def test_warp_affine():
inp_shape = (1, 3, 3, 3)
x = tensor(np.arange(27, dtype=np.float32).reshape(inp_shape))
weightv = [[[1.26666667, 0.6, -83.33333333], [-0.33333333, 1, 66.66666667]]]
outp = F.vision.warp_affine(x, tensor(weightv), (2, 2), border_mode="wrap")
res = np.array(
[
[
[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]],
[[18.75, 19.75, 20.75], [14.90625, 15.90625, 16.90625]],
]
],
dtype=np.float32,
)
if not is_cuda_available():
np.testing.assert_almost_equal(outp.numpy(), res, 5)
def test_remap():
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
map_xy_shape = (1, 2, 2, 2)
map_xy = tensor(
np.array(
[[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32
).reshape(map_xy_shape)
)
outp = F.vision.remap(inp, map_xy)
np.testing.assert_equal(
outp.numpy(), np.array([[[[1.0, 4.0], [4.0, 4.0]]]], dtype=np.float32)
)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
np.testing.assert_allclose(x.numpy(), y, atol=5e-4)
np.random.seed(123)
data1 = np.random.uniform(size=data1_shape).astype(np.float32)
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = np.random.uniform(size=data2_shape).astype(np.float32)
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.nn.binary_cross_entropy, compare_fn=compare_fn)
cases = [
{"input": [sigmoid(data1), label1], "output": expect1,},
{"input": [sigmoid(data2), label2], "output": expect2,},
]
opr_test(
cases,
partial(F.nn.binary_cross_entropy, with_logits=False),
compare_fn=compare_fn,
)
def test_hinge_loss():
np.random.seed(123)
# case with L1 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean()
cases.append({"input": [data, label], "output": expect})
opr_test(cases, F.nn.hinge_loss)
# cases with L2 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = ((np.clip(0, np.inf, 1 - data * label) ** 2).sum(axis=1)).mean()
cases.append({"input": [data, label], "output": expect})
def hinge_loss_with_l2_norm(pred, label):
return F.nn.hinge_loss(pred, label, "L2")
opr_test(cases, hinge_loss_with_l2_norm)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_nms(is_symbolic):
def fn(inp, scores):
return F.vision.nms(
inp,
scores=scores,
iou_thresh=0.5,
max_output=None if is_symbolic is None else 4,
)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
x = np.array(
[
[0, 0, 100, 100],
[10, 10, 100, 100],
[50, 50, 100, 100],
[100, 100, 150, 150],
],
dtype=np.float32,
)
inp = tensor(x)
scores = tensor([0.5, 0.8, 0.9, 0.6], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([2, 1, 3], dtype=np.int32))
x = np.array([], dtype=np.float32,).reshape(0, 4)
inp = tensor(x)
scores = tensor([], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([], dtype=np.int32))
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(get_device_count("gpu") > 0, reason="no int8 algorithm on cuda")
def test_batch_conv_bias():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N, IC, OC, IH, IW, KH, KW, PH, PW, SH, SW, has_bias=True,
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(N, OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_batch_conv_bias(inp, w, b):
b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
result = F.quantized.batch_conv_bias_activation(
inp, w, b, stride=(SH, SW), padding=(PH, PW), dtype=out_dtype,
)
return result.astype("float32")
expected = F.conv2d(inp_fp32, w_fp32[0], b_fp32 if has_bias else None)[0]
expected = expected.astype(out_dtype).astype("float32")
expected = F.flatten(expected)
result = run_batch_conv_bias(inp_int8, w_int8, b_int32)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 5, 5, 3, 3, 0, 0, 1, 1, True)
def test_conv2d_autocast():
"""check amp's result is equal to manually converted result"""
amp.enabled = True
inp = tensor(np.random.randn(1, 3, 224, 224), dtype=np.float32)
weight = tensor(np.random.randn(64, 3, 7, 7), dtype=np.float32)
out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
amp.enabled = False
expected = F.conv2d(
inp.astype("float16"),
weight.astype("float16"),
None,
(2, 2),
(3, 3),
(1, 1),
1,
compute_mode="float32",
)
assert out.dtype == np.float16
assert expected.dtype == np.float16
np.testing.assert_allclose(out.numpy(), expected.numpy())
def test_conv2d_zero_stride_numpy_array():
inp = np.random.randn(3, 224, 224).astype(np.float32)
inp = inp[np.newaxis, :]
inp = tensor(inp, dtype=np.float32)
weight = tensor(np.random.randn(16, 3, 3, 3), dtype=np.float32)
out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
def test_conv3d_zero_stride_numpy_array():
inp = np.random.randn(3, 224, 224, 224).astype(np.float32)
inp = inp[np.newaxis, :]
inp = tensor(inp, dtype=np.float32)
weight = tensor(np.random.randn(16, 3, 3, 3, 3), dtype=np.float32)
out = F.conv3d(inp, weight, None, (2, 2, 2), (3, 3, 3), (1, 1, 1), 1)
out.numpy()
def test_conv1d():
inp = tensor(np.ones((2, 2, 4), dtype=np.float32))
weight = tensor(np.ones((3, 2, 2), dtype=np.float32))
out = F.conv1d(inp, weight, None, 2, 0, 1, 1)
np.testing.assert_equal(
out.numpy(),
np.array(
[[[4, 4], [4, 4], [4, 4]], [[4, 4], [4, 4], [4, 4]]], dtype=np.float32
),
)
def test_batchnorm2d_autocast():
"""check amp's result is equal to manually converted result"""
amp.enabled = True
tshape = (1, 3, 224, 224)
pshape = (1, 3, 1, 1)
inp = tensor(np.random.randn(*tshape), dtype=np.float32)
weight = tensor(np.ones(pshape, dtype=np.float32))
bias = tensor(np.zeros(pshape, dtype=np.float32))
out = F.batch_norm(inp, weight=weight, bias=bias, training=True, inplace=False)
amp.enabled = False
expected = F.batch_norm(
inp.astype("float16"),
weight=weight,
bias=bias,
training=True,
inplace=False,
compute_mode="float32",
)
assert out.dtype == np.float16
assert expected.dtype == np.float16
np.testing.assert_allclose(out.numpy(), expected.numpy())
def test_conv3d():
inp = tensor(np.ones((2, 2, 4, 4, 4), dtype=np.float32))
weight = tensor(np.ones((3, 2, 2, 2, 2), dtype=np.float32))
out = F.conv3d(inp, weight, None, 2, 0, 1, 1)
np.testing.assert_equal(
out.numpy(), np.ones((2, 3, 2, 2, 2), dtype=np.float32) * 16
)
def test_condtake():
x = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([[True, False, True], [False, True, True]])
xx = tensor(x)
yy = tensor(y)
val, idx = F.cond_take(yy, xx)
np.testing.assert_equal(val.numpy(), x[y])
np.testing.assert_equal(idx.numpy(), np.where(y.reshape(-1))[0])
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_condtake(is_symbolic):
shapes = [
(3, 3, 3),
(0,),
(3, 0, 3),
]
def fn(mask, data):
return F.cond_take(mask, data)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
for shp in shapes:
x_np = np.random.randn(*shp).astype("float32")
mask_np = x_np > 0
x = tensor(x_np)
mask = tensor(mask_np)
ref_out = x_np[mask_np]
ref_idx = mask_np.flatten().nonzero()[0]
for i in range(3):
out, idx = fn(mask, x)
np.testing.assert_equal(out.numpy(), ref_out)
np.testing.assert_equal(idx.numpy(), ref_idx)
if is_symbolic is None:
break
def test_condtake_is_same():
op1 = builtin.CondTake()
op2 = builtin.CondTake()
assert op1 == op2
def test_nms_is_same():
op1 = builtin.NMSKeep(0.7, 100)
op2 = builtin.NMSKeep(0.7, 100)
op3 = builtin.NMSKeep(0.8, 100)
op4 = builtin.NMSKeep(0.7, 200)
assert op1 == op2
assert op1 != op3
assert op1 != op4
assert op3 != op4
def test_argmxx_on_inf():
def run_argmax():
x = F.zeros((100, 100))
x[:] = -float("inf")
idxs = F.argmax(x, axis=0)
return idxs
def run_argmin():
x = F.zeros((100, 100))
x[:] = float("inf")
idxs = F.argmin(x, axis=0)
return idxs
assert all(run_argmax() >= 0)
assert all(run_argmin() >= 0)
def test_deformable_psroi_pooling():
inp = np.random.random((1, 256, 64, 64)).astype("float32")
rois = np.random.random((1, 5)).astype("float32")
trans = np.random.random((24, 2, 7, 7)).astype("float32")
pooled_h = 7
pooled_w = 7
sample_per_part = 4
no_trans = False
part_size = 7
spatial_scale = 1.0 / 64
trans_std = 0.1
y = F.deformable_psroi_pooling(
tensor(inp),
tensor(rois),
tensor(trans),
no_trans,
part_size,
pooled_h,
pooled_w,
sample_per_part,
spatial_scale,
trans_std,
)
def test_cvt_color():
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def bgr2gray(bgr):
return np.dot(bgr[..., :3], [0.114, 0.587, 0.299])
inp = np.random.randn(3, 3, 3, 3).astype(np.float32)
out = np.expand_dims(rgb2gray(inp), 3).astype(np.float32)
x = tensor(inp)
y = F.vision.cvt_color(x, mode="RGB2GRAY")
np.testing.assert_allclose(y.numpy(), out, atol=1e-5)
out1 = np.expand_dims(bgr2gray(inp), 3).astype(np.float32)
y1 = F.vision.cvt_color(x, mode="BGR2GRAY")
np.testing.assert_allclose(y1.numpy(), out1, atol=1e-5)
@pytest.mark.parametrize("val", [2, [2,], [2, 3]])
def test_ones(val):
shp = tensor(val)
np_shp = np.array(val)
np.testing.assert_equal(F.ones(shp), np.ones(np_shp))
def test_assert_equal():
shape = (2, 3, 4, 5)
x = F.ones(shape, dtype=np.float32)
y = F.zeros(shape, dtype=np.float32) + 1.00001
z = F.utils._assert_equal(x, y)
def test_assert_not_equal():
shape = (2, 3, 4, 5)
x = F.ones(shape, dtype=np.float32)
y = F.zeros(shape, dtype=np.float32) + 1.1
with pytest.raises(RuntimeError):
z = F.utils._assert_equal(x, y)
def test_neg_axis():
x = tensor(np.random.normal(0, 1, (32, 5)))
y = F.argmax(x, axis=-1)
yy = F.argmax(x, axis=1)
np.testing.assert_equal(y.numpy(), yy.numpy())
y = F.argmax(x, axis=(-1, -2))
yy = F.argmax(x, axis=(0, 1))
np.testing.assert_equal(y.numpy(), yy.numpy())
y = F.argmin(x, axis=(-1, -2))
yy = F.argmin(x, axis=(0, 1))
np.testing.assert_equal(y.numpy(), yy.numpy())
def test_sliding_window():
N, C, H, W = 2, 3, 7, 8
inp = np.random.normal(size=(N, C, H, W))
ph, pw = 1, 2
sh, sw = 2, 1
wh, ww = 3, 2
dh, dw = 1, 3
s = lambda i, p, s, d, w: (i + p * 2 - (w - 1) * d - 1) // s + 1
inp_pad = np.zeros((N, C, H + ph * 2, W + pw * 2))
inp_pad[:, :, ph : H + ph, pw : W + pw] = inp
gt_out = np.empty(
(N, C, s(H, ph, sh, dh, wh), s(W, pw, sw, dw, ww), wh, ww), dtype=np.float32
)
for n, c, oh, ow in itertools.product(*map(range, gt_out.shape[:4])):
ih, iw = oh * sh, ow * sw
gt_out[n, c, oh, ow, :] = inp_pad[
n, c, ih : ih + (wh - 1) * dh + 1 : dh, iw : iw + (ww - 1) * dw + 1 : dw
]
out = F.sliding_window(
tensor(inp), (wh, ww), padding=(ph, pw), stride=(sh, sw), dilation=(dh, dw)
)
np.testing.assert_equal(gt_out, out.numpy())
def test_sliding_window_transpose():
N, C, H, W = 2, 3, 7, 8
ph, pw = 1, 2
sh, sw = 2, 1
wh, ww = 3, 2
dh, dw = 1, 3
s = lambda i, p, s, d, w: (i + p * 2 - (w - 1) * d - 1) // s + 1
inp = np.random.normal(
size=(N, C, s(H, ph, sh, dh, wh), s(W, pw, sw, dw, ww), wh, ww)
).astype(np.float32)
gt_out = np.zeros((N, C, H, W), dtype=np.float32)
for n, c in itertools.product(*map(range, inp.shape[:2])):
oh = 0
for ih in range(-ph, H + ph - dh * (wh - 1), sh):
ow = 0
for iw in range(-pw, W + pw - dw * (ww - 1), sw):
for kh, kw in itertools.product(*map(range, inp.shape[-2:])):
ih2 = ih + dh * kh
iw2 = iw + dw * kw
if ih2 >= 0 and ih2 < H and iw2 >= 0 and iw2 < W:
gt_out[n, c, ih2, iw2] += inp[n, c, oh, ow, kh, kw]
ow += 1
oh += 1
out = F.sliding_window_transpose(
tensor(inp),
(H, W),
(wh, ww),
padding=(ph, pw),
stride=(sh, sw),
dilation=(dh, dw),
)
np.testing.assert_equal(gt_out, out.numpy())
def test_pad():
src = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
dst = np.pad(src, ((2, 2), (2, 2)), "constant")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "CONSTANT")
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "constant", constant_values=3)
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "CONSTANT", constant_value=3)
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = | np.pad(src, ((2, 2), (2, 2)), "edge") | numpy.pad |
"""
Tests for the game class
"""
from nashpy.algorithms.support_enumeration import (potential_support_pairs,
indifference_strategies,
obey_support, is_ne,
solve_indifference,
powerset)
import unittest
import nashpy as nash
import numpy as np
from hypothesis import given
from hypothesis.extra.numpy import arrays
class TestSupportEnumeration(unittest.TestCase):
def test_potential_supports(self):
"""Test for the enumeration of potential supports"""
A = np.array([[1, 0], [-2, 3]])
B = np.array([[3, 2], [-1, 0]])
self.assertEqual(list(potential_support_pairs(A, B)),
[((0,), (0,)),
((0,), (1,)),
((0,), (0, 1)),
((1,), (0,)),
((1,), (1,)),
((1,), (0, 1)),
((0, 1), (0,)),
((0, 1), (1,)),
((0, 1), (0, 1))])
A = np.array([[1, 0, 2], [-2, 3, 9]])
B = np.array([[3, 2, 1], [-1, 0, 2]])
self.assertEqual(list(potential_support_pairs(A, B)),
[((0,), (0,)),
((0,), (1,)),
((0,), (2,)),
((0,), (0, 1)),
((0,), (0, 2)),
((0,), (1, 2)),
((0,), (0, 1, 2)),
((1,), (0,)),
((1,), (1,)),
((1,), (2,)),
((1,), (0, 1)),
((1,), (0, 2)),
((1,), (1, 2)),
((1,), (0, 1, 2)),
((0, 1), (0,)),
((0, 1), (1,)),
((0, 1), (2,)),
((0, 1), (0, 1)),
((0, 1), (0, 2)),
((0, 1), (1, 2)),
((0, 1), (0, 1, 2))])
A = np.array([[1, 0], [-2, 3], [2, 1]])
B = np.array([[3, 2], [-1, 0], [5, 2]])
self.assertEqual(list(potential_support_pairs(A, B)),
[((0,), (0,)),
((0,), (1,)),
((0,), (0, 1)),
((1,), (0,)),
((1,), (1,)),
((1,), (0, 1)),
((2,), (0,)),
((2,), (1,)),
((2,), (0, 1)),
((0, 1), (0,)),
((0, 1), (1,)),
((0, 1), (0, 1)),
((0, 2), (0,)),
((0, 2), (1,)),
((0, 2), (0, 1)),
((1, 2), (0,)),
((1, 2), (1,)),
((1, 2), (0, 1)),
((0, 1, 2), (0,)),
((0, 1, 2), (1,)),
((0, 1, 2), (0, 1))])
def test_potential_supports_with_non_degenerate_flag(self):
"""Test for the enumeration of potential supports when constrained to
non degenerate games"""
A = np.array([[1, 0], [-2, 3]])
B = np.array([[3, 2], [-1, 0]])
self.assertEqual(list(potential_support_pairs(A, B,
non_degenerate=True)),
[((0,), (0,)),
((0,), (1,)),
((1,), (0,)),
((1,), (1,)),
((0, 1), (0, 1))])
A = np.array([[1, 0, 2], [-2, 3, 9]])
B = np.array([[3, 2, 1], [-1, 0, 2]])
self.assertEqual(list(potential_support_pairs(A, B,
non_degenerate=True)),
[((0,), (0,)),
((0,), (1,)),
((0,), (2,)),
((1,), (0,)),
((1,), (1,)),
((1,), (2,)),
((0, 1), (0, 1)),
((0, 1), (0, 2)),
((0, 1), (1, 2))])
A = np.array([[1, 0], [-2, 3], [2, 1]])
B = np.array([[3, 2], [-1, 0], [5, 2]])
self.assertEqual(list(potential_support_pairs(A, B,
non_degenerate=True)),
[((0,), (0,)),
((0,), (1,)),
((1,), (0,)),
((1,), (1,)),
((2,), (0,)),
((2,), (1,)),
((0, 1), (0, 1)),
((0, 2), (0, 1)),
((1, 2), (0, 1))])
def test_indifference_strategies(self):
"""Test for the indifference strategies of potential supports"""
A = np.array([[2, 1], [0, 2]])
B = np.array([[2, 0], [1, 2]])
expected_indifference = [(np.array([1, 0]), np.array([1, 0])),
(np.array([1, 0]), np.array([0, 1])),
(np.array([0, 1]), np.array([1, 0])),
(np.array([0, 1]), np.array([0, 1])),
(np.array([1/3, 2/3]), np.array([1/3, 2/3]))]
obtained_indifference = [out[:2]
for out in indifference_strategies(A, B)]
self.assertEqual(len(obtained_indifference), len(expected_indifference))
for obtained, expected in zip(obtained_indifference,
expected_indifference):
self.assertTrue(np.array_equal(obtained, expected),
msg="obtained: {} !=expected: {}".format(obtained,
expected))
def test_indifference_strategies_with_non_degenerate(self):
"""Test for the indifference strategies of potential supports"""
A = np.array([[2, 1], [0, 2]])
B = np.array([[2, 0], [1, 2]])
expected_indifference = [(np.array([1, 0]), np.array([1, 0])),
(np.array([1, 0]), np.array([0, 1])),
(np.array([0, 1]), np.array([1, 0])),
(np.array([0, 1]), np.array([0, 1])),
(np.array([1/3, 2/3]), np.array([1/3, 2/3]))]
obtained_indifference = [out[:2]
for out in indifference_strategies(A, B,
non_degenerate=True)]
self.assertEqual(len(obtained_indifference), len(expected_indifference))
for obtained, expected in zip(obtained_indifference,
expected_indifference):
self.assertTrue(np.array_equal(obtained, expected),
msg="obtained: {} !=expected: {}".format(obtained,
expected))
def test_indifference_strategies_with_high_tolerance(self):
"""Test for the indifference strategies of potential supports"""
A = np.array([[2, 1], [0, 2]])
B = np.array([[2, 0], [1, 2]])
expected_indifference = [(np.array([1, 0]), np.array([1, 0])),
(np.array([1, 0]), np.array([0, 1])),
(np.array([0, 1]), np.array([1, 0])),
(np.array([0, 1]), np.array([0, 1])),
(np.array([1/3, 2/3]), np.array([1/3, 2/3]))]
obtained_indifference = [out[:2]
for out in indifference_strategies(A, B,
tol=10 ** -2)]
self.assertEqual(len(obtained_indifference), len(expected_indifference))
for obtained, expected in zip(obtained_indifference,
expected_indifference):
self.assertTrue(np.array_equal(obtained, expected),
msg="obtained: {} !=expected: {}".format(obtained,
expected))
def test_obey_support(self):
"""Test for obey support"""
A = np.array([[2, 1], [0, 2]])
B = np.array([[2, 0], [1, 2]])
self.assertFalse(obey_support(False, np.array([0, 1])))
self.assertFalse(obey_support(np.array([1, 0]), np.array([0, 1])))
self.assertFalse(obey_support( | np.array([0, .5]) | numpy.array |
from __future__ import annotations
from typing import List
import numpy as np
from numpy.typing import NDArray
from operation import MagneticOperation, remainder1_symmetry_operation
# ref: ITA Table1.5.1.1
transformation_matrix_monoclinic = {
"a_to_b": np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]), # (a, b, c) -> (c, a, b)
"-a_to_b": np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]), # (a, b, c) -> (b, -a, c)
"-b_to_b": np.array([[0, 0, 1], [0, -1, 0], [1, 0, 0]]), # (a, b, c) -> (c, -b, a)
"c_to_b": np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]), # (a, b, c) -> (b, c, a)
"-c_to_b": np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]]), # (a, b, c) -> (a, -c, b)
"b2_to_b1": np.array([[0, 0, -1], [0, 1, 0], [1, 0, -1]]), # P -> P, A -> C
"b3_to_b1": np.array([[-1, 0, 1], [0, 1, 0], [-1, 0, 0]]), # P -> P, I -> C
}
transformation_matrix_orthorhombic = {
"abc": np.eye(3),
"ba-c": | np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]]) | numpy.array |
# <NAME>, <NAME> finished
#
#
# 2019-11-16
# -----------------------------------------------------------------------------
# This function calcultes the CRPS between an ensemble and one observation.
#
# input:
# calculation: mxn matrix; m = number of simulations
# n = number of member in ensemble
# observation: mx1 vector; m = number of records
# case: 'emp': empirical cumulative distribution
# 'normal_exact': normal cumulative distribution
# 'gamma_exact': gamma cumulative distribution
#
# output:
# CRPS: mx1 vactor; each record is one entry
# -----------------------------------------------------------------------------
import numpy as np
from scipy.stats import norm, gamma
def CRPS(calculation, observation, case):
# transform input into numpy array
calculation = np.array(calculation, dtype='float64')
observation = np.array(observation, dtype='float64')
dim1 = calculation.shape
if len(dim1) == 1:
calculation = calculation.reshape((1,dim1[0]))
dim2 = observation.shape
if len(dim2) == 0:
observation = observation.reshape((1,1))
elif len(dim2) == 1:
observation = observation.reshape((dim2[0],1))
# initilisation
m = np.size(calculation, axis=0)
CRPS = np.empty((m, 1))
CRPS.fill(np.nan)
# non-parametric estimation based on the empirical cumulative distribution of the ensemble. According to <NAME>'s idea
if (case == "emp"):
for i in range(m):
if (np.any(np.isnan(calculation[i,:])) == 0 and np.isnan(observation[i]) == 0):
ssample = np.sort(calculation[i,:])
step_size = 1/(len(calculation[i,:]))
# caluculation of the area below the observation
area1 = 0
sub_sample1 = ssample[ssample <= observation[i]]
sub_sample1 = np.append(sub_sample1, observation[i])
for j in range(1,len(sub_sample1)):
area1 += (j*step_size)**2 * (sub_sample1[j] - sub_sample1[j-1])
# caluculation of the area above the observation
area2 = 0
sub_sample2 = ssample[ssample > observation[i]]
sub_sample2 = np.insert(sub_sample2, 0, observation[i])
n2 = len(sub_sample2)
for j in range(1,n2):
area2 += ((n2-j)*step_size)**2 * (sub_sample2[j] - sub_sample2[j-1])
CRPS[i] = area1 + area2
else:
CRPS[i] = np.nan
# -------------------------------------------------------------------------
# estimation based on the normal cumulative distribution of the ensemble
elif (case == "normal_exact"):
for i in range(m):
if (np.any(np.isnan(calculation[i,:])) == 0 and np.isnan(observation[i]) == 0):
# preparation
mu, sigma = norm.fit(calculation[i,:])
# transform standard deviation to unbiased estimation of standard deviation
nb_mb = len(calculation[i,:])
sighat = nb_mb/(nb_mb-1) * sigma
vcr = (observation[i] - mu) / sighat
phi = norm.pdf(vcr, loc=0, scale=1)
PHI = norm.cdf(vcr, loc=0, scale=1)
# calculation of the CRPS according to Gneiting and Raftery 2007
CRPS[i] = abs(sighat * ((1/np.sqrt(np.pi)) - 2*phi - (vcr*(2*PHI-1))))
else:
CRPS[i] = np.nan
# -------------------------------------------------------------------------
# estimation based on the gamma cumulative distribution of the ensemble
elif (case == "gamma_exact"):
for i in range(m):
if (np.any(np.isnan(calculation[i,:])) == 0 and np.isnan(observation[i]) == 0):
# preparation; exchange negative values in the data
sample = calculation[i,:]
idxs, = np.where(sample <= 0)
for idx in idxs:
sample[idx] = 0.0001
# fit data to gamma distribtion
alpha, loc, beta = gamma.fit(sample, floc=0)
# generate cumulative gamma distribution
data1 = gamma.rvs(alpha, loc=0, scale=beta, size=1000)
data2 = gamma.rvs(alpha, loc=0, scale=beta, size=1000)
CRPS[i]= np.mean( | np.absolute(data1 - observation[i]) | numpy.absolute |
# Copyright(c) <NAME> 2018
"""
This file contains tools to reorder the strokes that come out of the Iterative Stroke Sampling simulation.
We optimize for swapping color and brush as rarely as possible, by reordering strokes that wouldn't overpaint eachother.
"""
import copy
import os
import numpy as np
from src import paths
from src.config import BrushConfig
from src.rapid.RapidFileWriter import RapidFileWriter
# ________________________________________________ SINGLE COMMANDS _____________________________________________________
class Command(object):
def get_rapid(self):
raise NotImplementedError
def is_get_color(self):
return isinstance(self, GetColor)
def is_clean(self):
return isinstance(self, Clean)
def is_change_brush(self):
return isinstance(self, ChangeBrush)
def is_apply_stroke(self):
return isinstance(self, ApplyStroke)
class GetColor(Command):
def __init__(self, color):
self.color = color
def get_rapid(self):
return "get" + self.color.capital
class Clean(Command):
def __init__(self):
pass
def get_rapid(self):
return "Clean"
class ChangeBrush(Command):
def __init__(self, from_brush, to_brush):
self.from_brush = from_brush
self.to_brush = to_brush
def get_rapid(self):
return "ChBr_F_" + self.from_brush.name + "_TO_" + self.to_brush.name
class ApplyStroke(Command):
def __init__(self, brush, color, stroke_id, rotation_id,
center_x_mm, center_y_mm, painting_size_x_mm, painting_size_y_mm, canvas_center_xyz_mm):
self.brush = brush
self.color = color # just stored for info in order optimizer
self.stroke_id = stroke_id
self.rotation_id = rotation_id
self.x = canvas_center_xyz_mm[0] + center_x_mm - (painting_size_x_mm / 2.0)
self.y = canvas_center_xyz_mm[1] + center_y_mm - (painting_size_y_mm / 2.0)
self.z = canvas_center_xyz_mm[2]
def get_rapid(self):
func_name = self.brush.stroke_names_list[self.stroke_id] + "_" + str(self.rotation_id + 1)
return func_name + " " + str(self.x) + ", " + str(self.y) + ", " + str(self.z)
# ______________________________________________ SEQUENCE OF COMMANDS __________________________________________________
class CommandSequence(object):
def __init__(self, args):
self.args = args
self.name = args.name
self.commands = None
self.reset()
def append(self, x):
self.commands.append(x)
def optimize_order(self):
print("Cleaning command sequence")
self.commands = _clean_list_of_commands(list_of_commands=self.commands)
print("Optimizing command order for least amount of color & brush swaps.")
self.commands = StrokeOrderOptimizer.run(list_of_commands=self.commands, args=self.args)
# Do again after order optimization
self.commands = _clean_list_of_commands(list_of_commands=self.commands)
def write_rapid_code_to_file(self):
with open(os.path.join(paths.paintings_path, self.name + ".txt"), 'w') as rapid_file:
rapid_file_writer = RapidFileWriter(rapid_file, args=self.args)
rapid_file_writer.begin_module()
rapid_file_writer.import_robtargets_from_all_rapid_fns()
# __________________________________________________ main __________________________________________________
rapid_file.write("\n\n\t" + "PROC main()\n")
rapid_file_writer.call_activate_feedback_pin()
# just to track for print statement
num_cleaned = 0
num_changed_brush = 0
current_brush = None
for cmd in self.commands:
if cmd.is_clean():
num_cleaned += 1
rapid_file_writer.call_pump_clean(brush=current_brush)
rapid_file_writer.call_towel(brush=current_brush)
rapid_file_writer.call_pump_clean(brush=current_brush)
rapid_file_writer.call_towel(brush=current_brush)
elif cmd.is_get_color():
rapid_file_writer.call_get_color(brush=current_brush, color=cmd.color)
elif cmd.is_change_brush():
num_changed_brush += 1
current_brush = cmd.to_brush
rapid_file_writer.call_brush_swap(from_brush=cmd.from_brush, to_brush=cmd.to_brush)
elif cmd.is_apply_stroke():
rapid_file.write("\t\t" + cmd.get_rapid() + ";" + "\n")
else:
raise ValueError(cmd)
assert current_brush.name == BrushConfig.NOTHING_MOUNTED.name
rapid_file_writer.end_proc()
rapid_file_writer.import_all_function_definitions()
rapid_file_writer.end_module()
print("Changed brush ", num_changed_brush, " times.")
print("Cleaned ", num_cleaned, " times.")
def reset(self):
self.commands = []
# ____________________________________________ TOOLS FOR SEQ OF COMMANDS _______________________________________________
def _clean_list_of_commands(list_of_commands):
""" scans for and deletes redundant stuff in code like get_color -> Clean """
_list_of_commands = copy.copy(list_of_commands)
found_smth = True
while found_smth:
i = 0
curr_len = len(_list_of_commands)
found_smth = False
while i < curr_len - 1:
# get_color -> Clean
if _list_of_commands[i].is_get_color():
if _list_of_commands[i + 1].is_clean():
del _list_of_commands[i] # delete get_color
found_smth = True
# get_color -> Change_Brush
elif _list_of_commands[i].is_get_color():
if _list_of_commands[i + 1].is_change_brush():
del _list_of_commands[i] # delete get_color
found_smth = True
# Change_Brush -> Clean
elif _list_of_commands[i].is_change_brush():
if _list_of_commands[i + 1].is_clean():
del _list_of_commands[i + 1] # delete Clean
found_smth = True
# Change_Brush -> Change_Brush
elif _list_of_commands[i].is_change_brush():
if _list_of_commands[i + 1].is_change_brush():
del _list_of_commands[i] # delete first Change_Brush
found_smth = True
# Change_Brush to same brush
elif _list_of_commands[i].is_change_brush():
if _list_of_commands[i].from_brush is _list_of_commands[i].to_brush:
del _list_of_commands[i] # delete Change_Brush
found_smth = True
# get_color -> get_color
elif _list_of_commands[i].is_get_color():
if _list_of_commands[i + 1].is_get_color():
del _list_of_commands[i] # delete first get_color
found_smth = True
# Clean -> Clean
elif _list_of_commands[i].is_clean():
if _list_of_commands[i + 1].is_clean():
del _list_of_commands[i] # delete first Clean
found_smth = True
i += 1
curr_len = len(_list_of_commands)
return _list_of_commands
class _OptimizerStrokeBlock:
def __init__(self, color, brush):
self.strokes_list = []
self.brush = brush
self.color = color
class StrokeOrderOptimizer:
@staticmethod
def run(list_of_commands, args):
_list_of_commands = copy.copy(list_of_commands)
list_of_strokes = StrokeOrderOptimizer._build_strokes_list(list_of_commands=_list_of_commands)
stroke_blocks_list = StrokeOrderOptimizer._build_stroke_blocks(list_of_strokes=list_of_strokes)
# optimize order
did_something = True
while did_something:
did_something = StrokeOrderOptimizer._optimize(stroke_blocks_list=stroke_blocks_list,
stroke_size=args.stroke_size_mm)
# return list of cmds like we got it, but with optimized order
return StrokeOrderOptimizer._list_of_blocks_to_list_of_commands(stroke_blocks_list)
@staticmethod
def _list_of_blocks_to_list_of_commands(stroke_blocks_list,):
commands = []
last_brush = BrushConfig.BRUSH_AT_BOOT
for block in stroke_blocks_list:
# swap brush if not equal to old one
if last_brush is not block.brush:
if last_brush is not BrushConfig.NOTHING_MOUNTED:
commands.append(Clean())
commands.append(ChangeBrush(from_brush=last_brush, to_brush=block.brush))
last_brush = block.brush
for stroke in block.strokes_list:
# after every stroke get color
commands.append(GetColor(color=stroke.color))
commands.append(stroke)
# Clean brush at end of every block
commands.append(Clean())
commands.append(ChangeBrush(from_brush=last_brush, to_brush=BrushConfig.NOTHING_MOUNTED))
return commands
@staticmethod
def _overlaps(block_1, block_2, stroke_size):
# loop over all strokes in ith block
for stroke_idx_loop in range(len(block_1.strokes_list)):
# loop over all strokes in 2nd merge_block
for stroke_idx_2nd in range(len(block_2.strokes_list)):
# check if strokes have spacial overlap
x_distance = np.abs(block_2.strokes_list[stroke_idx_2nd].x - block_1.strokes_list[stroke_idx_loop].x)
y_distance = np.abs(block_2.strokes_list[stroke_idx_2nd].y - block_1.strokes_list[stroke_idx_loop].y)
eucl = | np.sqrt(x_distance ** 2 + y_distance ** 2) | numpy.sqrt |
# -*- coding: utf-8 -*-
from EXOSIMS.Completeness.BrownCompleteness import BrownCompleteness
import numpy as np
import os
import hashlib
import scipy.optimize as optimize
import scipy.interpolate as interpolate
import scipy.integrate as integrate
import astropy.units as u
try:
import cPickle as pickle
except ImportError:
import pickle
from EXOSIMS.util.memoize import memoize
import sys
# Python 3 compatibility:
if sys.version_info[0] > 2:
xrange = range
class GarrettCompleteness(BrownCompleteness):
"""Analytical Completeness class
This class contains all variables and methods necessary to perform
Completeness Module calculations based on Garrett and Savransky 2016
in exoplanet mission simulation.
The completeness calculations performed by this method assume that all
planetary parameters are independently distributed. The probability density
functions used here are either independent or marginalized from a joint
probability density function.
Args:
\*\*specs:
user specified values
Attributes:
updates (nx5 ndarray):
Completeness values of successive observations of each star in the
target list (initialized in gen_update)
"""
def __init__(self, **specs):
# bring in inherited Completeness prototype __init__ values
BrownCompleteness.__init__(self, **specs)
# get unitless values of population parameters
self.amin = float(self.PlanetPopulation.arange.min().value)
self.amax = float(self.PlanetPopulation.arange.max().value)
self.emin = float(self.PlanetPopulation.erange.min())
self.emax = float(self.PlanetPopulation.erange.max())
self.pmin = float(self.PlanetPopulation.prange.min())
self.pmax = float(self.PlanetPopulation.prange.max())
self.Rmin = float(self.PlanetPopulation.Rprange.min().to('earthRad').value)
self.Rmax = float(self.PlanetPopulation.Rprange.max().to('earthRad').value)
if self.PlanetPopulation.constrainOrbits:
self.rmin = self.amin
self.rmax = self.amax
else:
self.rmin = self.amin*(1.0 - self.emax)
self.rmax = self.amax*(1.0 + self.emax)
self.zmin = self.pmin*self.Rmin**2
self.zmax = self.pmax*self.Rmax**2
# conversion factor
self.x = float(u.earthRad.to('AU'))
# distributions needed
self.dist_sma = self.PlanetPopulation.dist_sma
self.dist_eccen = self.PlanetPopulation.dist_eccen
self.dist_eccen_con = self.PlanetPopulation.dist_eccen_from_sma
self.dist_albedo = self.PlanetPopulation.dist_albedo
self.dist_radius = self.PlanetPopulation.dist_radius
# are any of a, e, p, Rp constant?
self.aconst = self.amin == self.amax
self.econst = self.emin == self.emax
self.pconst = self.pmin == self.pmax
self.Rconst = self.Rmin == self.Rmax
# degenerate case where aconst, econst and e = 0
assert not (all([self.aconst,self.econst,self.pconst,self.Rconst]) and self.emax == 0), \
"At least one parameter (out of semi-major axis, albedo, and radius) must vary when eccentricity is constant and zero."
# solve for bstar
beta = np.linspace(0.0,np.pi,1000)*u.rad
Phis = self.PlanetPhysicalModel.calc_Phi(beta)
# Interpolant for phase function which removes astropy Quantity
self.Phi = interpolate.InterpolatedUnivariateSpline(beta.value,Phis,k=3,ext=1)
self.Phiinv = interpolate.InterpolatedUnivariateSpline(Phis[::-1],beta.value[::-1],k=3,ext=1)
# get numerical derivative of phase function
dPhis = np.zeros(beta.shape)
db = beta[1].value - beta[0].value
dPhis[0:1] = (-25.0*Phis[0:1]+48.0*Phis[1:2]-36.0*Phis[2:3]+16.0*Phis[3:4]-3.0*Phis[4:5])/(12.0*db)
dPhis[-2:-1] = (25.0*Phis[-2:-1]-48.0*Phis[-3:-2]+36.0*Phis[-4:-3]-16.0*Phis[-5:-4]+3.0*Phis[-6:-5])/(12.0*db)
dPhis[2:-2] = (Phis[0:-4]-8.0*Phis[1:-3]+8.0*Phis[3:-1]-Phis[4:])/(12.0*db)
self.dPhi = interpolate.InterpolatedUnivariateSpline(beta.value,dPhis,k=3,ext=1)
# solve for bstar
f = lambda b: 2.0*np.sin(b)*np.cos(b)*self.Phi(b) + np.sin(b)**2*self.dPhi(b)
self.bstar = float(optimize.root(f,np.pi/3.0).x)
# helpful constants
self.cdmin1 = -2.5*np.log10(self.pmax*(self.Rmax*self.x/self.rmin)**2)
self.cdmin2 = -2.5*np.log10(self.pmax*(self.Rmax*self.x*np.sin(self.bstar))**2*self.Phi(self.bstar))
self.cdmin3 = -2.5*np.log10(self.pmax*(self.Rmax*self.x/self.rmax)**2)
self.cdmax = -2.5*np.log10(self.pmin*(self.Rmin*self.x/self.rmax)**2)
self.val = np.sin(self.bstar)**2*self.Phi(self.bstar)
self.d1 = -2.5*np.log10(self.pmax*(self.Rmax*self.x/self.rmin)**2)
self.d2 = -2.5*np.log10(self.pmax*(self.Rmax*self.x/self.rmin)**2*self.Phi(self.bstar))
self.d3 = -2.5*np.log10(self.pmax*(self.Rmax*self.x/self.rmax)**2*self.Phi(self.bstar))
self.d4 = -2.5*np.log10(self.pmax*(self.Rmax*self.x/self.rmax)**2*self.Phi(np.pi/2.0))
self.d5 = -2.5*np.log10(self.pmin*(self.Rmin*self.x/self.rmax)**2*self.Phi(np.pi/2.0))
# vectorize scalar methods
self.rgrand2v = np.vectorize(self.rgrand2, otypes=[np.float64])
self.f_dmagsv = np.vectorize(self.f_dmags, otypes=[np.float64])
self.f_sdmagv = np.vectorize(self.f_sdmag, otypes=[np.float64])
self.f_dmagv = np.vectorize(self.f_dmag, otypes=[np.float64])
self.f_sv = np.vectorize(self.f_s, otypes=[np.float64])
self.mindmagv = np.vectorize(self.mindmag, otypes=[np.float64])
self.maxdmagv = np.vectorize(self.maxdmag, otypes=[np.float64])
# inverse functions for phase angle
b1 = np.linspace(0.0, self.bstar, 20000)
# b < bstar
self.binv1 = interpolate.InterpolatedUnivariateSpline(np.sin(b1)**2*self.Phi(b1), b1, k=1, ext=1)
b2 = np.linspace(self.bstar, np.pi, 20000)
b2val = np.sin(b2)**2*self.Phi(b2)
# b > bstar
self.binv2 = interpolate.InterpolatedUnivariateSpline(b2val[::-1], b2[::-1], k=1, ext=1)
if self.rmin != self.rmax:
# get pdf of r
self.vprint('Generating pdf of orbital radius')
r = np.linspace(self.rmin, self.rmax, 1000)
fr = np.zeros(r.shape)
for i in xrange(len(r)):
fr[i] = self.f_r(r[i])
self.dist_r = interpolate.InterpolatedUnivariateSpline(r, fr, k=3, ext=1)
self.vprint('Finished pdf of orbital radius')
if not all([self.pconst,self.Rconst]):
# get pdf of p*R**2
self.vprint('Generating pdf of albedo times planetary radius squared')
z = np.linspace(self.zmin, self.zmax, 1000)
fz = np.zeros(z.shape)
for i in xrange(len(z)):
fz[i] = self.f_z(z[i])
self.dist_z = interpolate.InterpolatedUnivariateSpline(z, fz, k=3, ext=1)
self.vprint('Finished pdf of albedo times planetary radius squared')
def target_completeness(self, TL):
"""Generates completeness values for target stars
This method is called from TargetList __init__ method.
Args:
TL (TargetList module):
TargetList class object
Returns:
comp0 (ndarray):
1D numpy array of completeness values for each target star
"""
OS = TL.OpticalSystem
# limiting planet delta magnitude for completeness
dMagMax = self.dMagLim
# important PlanetPopulation attributes
atts = list(self.PlanetPopulation.__dict__)
extstr = ''
for att in sorted(atts, key=str.lower):
if not callable(getattr(self.PlanetPopulation, att)) and att != 'PlanetPhysicalModel':
extstr += '%s: ' % att + str(getattr(self.PlanetPopulation, att)) + ' '
# include dMagMax
extstr += '%s: ' % 'dMagMax' + str(dMagMax) + ' '
ext = hashlib.md5(extstr.encode('utf-8')).hexdigest()
self.filename += ext
Cpath = os.path.join(self.cachedir, self.filename+'.acomp')
dist_s = self.genComp(Cpath, TL)
dist_sv = np.vectorize(dist_s.integral, otypes=[np.float64])
# calculate separations based on IWA
mode = list(filter(lambda mode: mode['detectionMode'] == True, OS.observingModes))[0]
IWA = mode['IWA']
OWA = mode['OWA']
smin = (np.tan(IWA)*TL.dist).to('AU').value
if np.isinf(OWA):
smax = np.array([self.rmax]*len(smin))
else:
smax = (np.tan(OWA)*TL.dist).to('AU').value
smax[smax>self.rmax] = self.rmax
comp0 = np.zeros(smin.shape)
# calculate dMags based on maximum dMag
if self.PlanetPopulation.scaleOrbits:
L = np.where(TL.L>0, TL.L, 1e-10) #take care of zero/negative values
smin = smin/np.sqrt(L)
smax = smax/np.sqrt(L)
dMagMax -= 2.5*np.log10(L)
mask = smin<self.rmax
comp0[mask] = self.comp_s(smin[mask], smax[mask], dMagMax[mask])
else:
mask = smin<self.rmax
comp0[mask] = dist_sv(smin[mask], smax[mask])
# ensure that completeness values are between 0 and 1
comp0 = np.clip(comp0, 0., 1.)
return comp0
def genComp(self, Cpath, TL):
"""Generates function to get completeness values
Args:
Cpath (str):
Path to pickled dictionary containing interpolant function
TL (TargetList module):
TargetList class object
Returns:
dist_s (callable(s)):
Marginalized to dMagMax probability density function for
projected separation
"""
# limiting planet delta magnitude for completeness
dMagMax = self.dMagLim
if os.path.exists(Cpath):
# dist_s interpolant already exists for parameters
self.vprint('Loading cached completeness file from %s' % Cpath)
try:
with open(Cpath, "rb") as ff:
H = pickle.load(ff)
except UnicodeDecodeError:
with open(Cpath, "rb") as ff:
H = pickle.load(ff,encoding='latin1')
self.vprint('Completeness loaded from cache.')
dist_s = H['dist_s']
else:
# generate dist_s interpolant and pickle it
self.vprint('Cached completeness file not found at "%s".' % Cpath)
self.vprint('Generating completeness.')
self.vprint('Marginalizing joint pdf of separation and dMag up to dMagMax')
# get pdf of s up to dMagMax
s = np.linspace(0.0,self.rmax,1000)
fs = np.zeros(s.shape)
for i in xrange(len(s)):
fs[i] = self.f_s(s[i], dMagMax)
dist_s = interpolate.InterpolatedUnivariateSpline(s, fs, k=3, ext=1)
self.vprint('Finished marginalization')
H = {'dist_s': dist_s}
with open(Cpath, 'wb') as ff:
pickle.dump(H, ff)
self.vprint('Completeness data stored in %s' % Cpath)
return dist_s
def comp_s(self, smin, smax, dMag):
"""Calculates completeness by first integrating over dMag and then
projected separation.
Args:
smin (ndarray):
Values of minimum projected separation (AU) from instrument
smax (ndarray):
Value of maximum projected separation (AU) from instrument
dMag (ndarray):
Planet delta magnitude
Returns:
comp (ndarray):
Completeness values
"""
# cast to arrays
smin = np.array(smin, ndmin=1, copy=False)
smax = np.array(smax, ndmin=1, copy=False)
dMag = np.array(dMag, ndmin=1, copy=False)
comp = np.zeros(smin.shape)
for i in xrange(len(smin)):
comp[i] = integrate.fixed_quad(self.f_sv, smin[i], smax[i], args=(dMag[i],), n=50)[0]
# ensure completeness values are between 0 and 1
comp = np.clip(comp, 0., 1.)
return comp
@memoize
def f_s(self, s, dMagMax):
"""Calculates probability density of projected separation marginalized
up to dMagMax
Args:
s (float):
Value of projected separation
dMagMax (float):
Maximum planet delta magnitude
Returns:
f (float):
Probability density
"""
if (s == 0.0) or (s == self.rmax):
f = 0.0
else:
d1 = self.mindmag(s)
d2 = self.maxdmag(s)
if d2 > dMagMax:
d2 = dMagMax
if d1 > d2:
f = 0.0
else:
f = integrate.fixed_quad(self.f_dmagsv, d1, d2, args=(s,), n=50)[0]
return f
@memoize
def f_dmags(self, dmag, s):
"""Calculates the joint probability density of dMag and projected
separation
Args:
dmag (float):
Planet delta magnitude
s (float):
Value of projected separation (AU)
Returns:
f (float):
Value of joint probability density
"""
if (dmag < self.mindmag(s)) or (dmag > self.maxdmag(s)) or (s == 0.0):
f = 0.0
else:
if self.rmin == self.rmax:
b1 = np.arcsin(s/self.amax)
b2 = np.pi-b1
z1 = 10.0**(-0.4*dmag)*(self.amax/self.x)**2/self.Phi(b1)
z2 = 10.0**(-0.4*dmag)*(self.amax/self.x)**2/self.Phi(b2)
f = 0.0
if (z1 > self.zmin) and (z1 < self.zmax):
f += np.sin(b1)/2.0*self.dist_z(z1)*z1*np.log(10.0)/(2.5*self.amax*np.cos(b1))
if (z2 > self.zmin) and (z2 < self.zmax):
f += np.sin(b2)/2.0*self.dist_z(z2)*z2*np.log(10.0)/(-2.5*self.amax*np.cos(b2))
else:
ztest = (s/self.x)**2*10.**(-0.4*dmag)/self.val
if self.PlanetPopulation.pfromRp:
f = 0.0
minR = self.PlanetPopulation.Rbs[:-1]
maxR = self.PlanetPopulation.Rbs[1:]
for i in xrange(len(minR)):
ptest = self.PlanetPopulation.get_p_from_Rp(minR[i]*u.earthRad)
Rtest = np.sqrt(ztest/ptest)
if Rtest > minR[i]:
if Rtest > self.Rmin:
Rl = Rtest
else:
Rl = self.Rmin
else:
if self.Rmin > minR[i]:
Rl = self.Rmin
else:
Rl = minR[i]
if self.Rmax > maxR[i]:
Ru = maxR[i]
else:
Ru = self.Rmax
if Rl < Ru:
f += integrate.fixed_quad(self.f_dmagsRp, Rl, Ru, args=(dmag,s), n=200)[0]
elif ztest >= self.zmax:
f = 0.0
elif (self.pconst & self.Rconst):
f = self.f_dmagsz(self.zmin,dmag,s)
else:
if ztest < self.zmin:
f = integrate.fixed_quad(self.f_dmagsz, self.zmin, self.zmax, args=(dmag, s), n=200)[0]
else:
f = integrate.fixed_quad(self.f_dmagsz, ztest, self.zmax, args=(dmag, s), n=200)[0]
return f
def f_dmagsz(self, z, dmag, s):
"""Calculates the joint probability density of albedo times planetary
radius squared, dMag, and projected separation
Args:
z (ndarray):
Values of albedo times planetary radius squared
dmag (float):
Planet delta magnitude
s (float):
Value of projected separation
Returns:
f (ndarray):
Values of joint probability density
"""
if not isinstance(z,np.ndarray):
z = np.array(z, ndmin=1, copy=False)
vals = (s/self.x)**2*10.**(-0.4*dmag)/z
f = np.zeros(z.shape)
fa = f[vals<self.val]
za = z[vals<self.val]
valsa = vals[vals<self.val]
b1 = self.binv1(valsa)
b2 = self.binv2(valsa)
r1 = s/np.sin(b1)
r2 = s/np.sin(b2)
good1 = ((r1>self.rmin)&(r1<self.rmax))
good2 = ((r2>self.rmin)&(r2<self.rmax))
if (self.pconst & self.Rconst):
fa[good1] = np.sin(b1[good1])/2.0*self.dist_r(r1[good1])/np.abs(self.Jac(b1[good1]))
fa[good2] += np.sin(b2[good2])/2.0*self.dist_r(r2[good2])/np.abs(self.Jac(b2[good2]))
else:
fa[good1] = self.dist_z(za[good1])*np.sin(b1[good1])/2.0*self.dist_r(r1[good1])/np.abs(self.Jac(b1[good1]))
fa[good2] += self.dist_z(za[good2])*np.sin(b2[good2])/2.0*self.dist_r(r2[good2])/np.abs(self.Jac(b2[good2]))
f[vals<self.val] = fa
return f
def f_dmagsRp(self, Rp, dmag, s):
"""Calculates the joint probability density of planetary radius,
dMag, and projected separation
Args:
Rp (ndarray):
Values of planetary radius
dmag (float):
Planet delta magnitude
s (float):
Value of projected separation
Returns:
f (ndarray):
Values of joint probability density
"""
if not isinstance(Rp,np.ndarray):
Rp = np.array(Rp, ndmin=1, copy=False)
vals = (s/self.x)**2*10.**(-0.4*dmag)/self.PlanetPopulation.get_p_from_Rp(Rp*u.earthRad)/Rp**2
f = np.zeros(Rp.shape)
fa = f[vals<self.val]
Rpa = Rp[vals<self.val]
valsa = vals[vals<self.val]
b1 = self.binv1(valsa)
b2 = self.binv2(valsa)
r1 = s/np.sin(b1)
r2 = s/np.sin(b2)
good1 = ((r1>self.rmin)&(r1<self.rmax))
good2 = ((r2>self.rmin)&(r2<self.rmax))
if (self.pconst & self.Rconst):
fa[good1] = np.sin(b1[good1])/2.0*self.dist_r(r1[good1])/np.abs(self.Jac(b1[good1]))
fa[good2] += np.sin(b2[good2])/2.0*self.dist_r(r2[good2])/np.abs(self.Jac(b2[good2]))
else:
fa[good1] = self.dist_radius(Rpa[good1])*np.sin(b1[good1])/2.0*self.dist_r(r1[good1])/np.abs(self.Jac(b1[good1]))
fa[good2] += self.dist_radius(Rpa[good2])*np.sin(b2[good2])/2.0*self.dist_r(r2[good2])/np.abs(self.Jac(b2[good2]))
f[vals<self.val] = fa
return f
def mindmag(self, s):
"""Calculates the minimum value of dMag for projected separation
Args:
s (float):
Projected separations (AU)
Returns:
mindmag (float):
Minimum planet delta magnitude
"""
if s == 0.0:
mindmag = self.cdmin1
elif s < self.rmin*np.sin(self.bstar):
mindmag = self.cdmin1-2.5*np.log10(self.Phi(np.arcsin(s/self.rmin)))
elif s < self.rmax*np.sin(self.bstar):
mindmag = self.cdmin2+5.0*np.log10(s)
elif s <= self.rmax:
mindmag = self.cdmin3-2.5*np.log10(self.Phi(np.arcsin(s/self.rmax)))
else:
mindmag = np.inf
return mindmag
def maxdmag(self, s):
"""Calculates the maximum value of dMag for projected separation
Args:
s (float):
Projected separation (AU)
Returns:
maxdmag (float):
Maximum planet delta magnitude
"""
if s == 0.0:
maxdmag = self.cdmax - 2.5*np.log10(self.Phi(np.pi))
elif s < self.rmax:
maxdmag = self.cdmax - 2.5*np.log10(np.abs(self.Phi(np.pi-np.arcsin(s/self.rmax))))
else:
maxdmag = self.cdmax - 2.5*np.log10(self.Phi(np.pi/2.0))
return maxdmag
def Jac(self, b):
"""Calculates determinant of the Jacobian transformation matrix to get
the joint probability density of dMag and s
Args:
b (ndarray):
Phase angles
Returns:
f (ndarray):
Determinant of Jacobian transformation matrix
"""
f = -2.5/(self.Phi(b)*np.log(10.0))*self.dPhi(b)*np.sin(b) - 5./ | np.log(10.0) | numpy.log |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 28 20:30:31 2021
@author: ASUS
"""
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score
from torchvision import transforms
import torch
import numpy as np
import logging
LOG_FORMAT = "%(asctime)s - %(message)s"
logging.basicConfig(filename= "./pretrained_models/result.log",
level = logging.INFO,
format=LOG_FORMAT)
def write_log(message):
logger = logging.getLogger()
logger.info(message)
def save_model(args, model, name=''):
name = name if len(name) > 0 else 'default_model'
torch.save(model, 'pretrained_models/{}.pt'.format(name))
def load_model(args, name=''):
name = name if len(name) > 0 else 'default_model'
model = torch.load('pretrained_models/{}.pt'.format(name))
return model
def metrics(results, truths):
preds = results.cpu().detach().numpy()
truth = truths.cpu().detach().numpy()
preds = np.where(preds > 0.5, 1, 0)
truth = np.where(truth > 0.5, 1, 0)
f_score_micro = f1_score(truth, preds, average='micro')
f_score_macro = f1_score(truth, preds, average='macro')
accuarcy = accuracy_score(truth, preds)
precision = precision_score(truth, preds)
recall = recall_score(truth, preds)
return accuarcy, f_score_micro, f_score_macro, precision, recall
def multiclass_acc(results, truths):
preds = results.view(-1).cpu().detach().numpy()
truth = truths.view(-1).cpu().detach().numpy()
preds = np.where(preds > 0.5, 1, 0)
truth = | np.where(truth > 0.5, 1, 0) | numpy.where |
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from functools import partial
from .. import lombscargle
ALL_METHODS = ['auto', 'slow', 'fast', 'scipy', 'chi2', 'fastchi2']
BIAS_METHODS = ['auto', 'slow', 'fast', 'chi2', 'fastchi2']
NTERMS_METHODS = ['auto', 'chi2', 'fastchi2']
FAST_METHODS = ['fast', 'fastchi2']
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 20 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
@pytest.mark.parametrize('method', ALL_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_bias', [True, False])
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('normalization', ['normalized', 'unnormalized'])
def test_lombscargle_common(method, center_data, fit_bias,
with_errors, normalization, data):
if fit_bias and method not in BIAS_METHODS:
return
if with_errors and method == 'scipy':
return
t, y, dy = data
if not with_errors:
dy = None
freq = 0.8 + 0.01 * np.arange(40)
kwds = dict(normalization=normalization,
center_data=center_data,
fit_bias=fit_bias)
expected_output = lombscargle(t, y, dy, frequency=freq, **kwds)
# don't test fast fft methods here
if 'fast' in method:
kwds['method_kwds'] = dict(use_fft=False)
# check that output matches that of the "auto" method
output = lombscargle(t, y, dy, frequency=freq, method=method, **kwds)
assert_allclose(output, expected_output, rtol=1E-7, atol=1E-20)
# check that output of dy=None matches output of dy=[array of ones]
output_dy_None = lombscargle(t, y, dy=None,
frequency=freq, method=method, **kwds)
output_dy_ones = lombscargle(t, y, dy=np.ones_like(t),
frequency=freq, method=method, **kwds)
assert_allclose(output_dy_None, output_dy_ones)
@pytest.mark.parametrize('method', NTERMS_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_bias', [True, False])
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('nterms', range(5))
@pytest.mark.parametrize('normalization', ['normalized', 'unnormalized'])
def test_lombscargle_nterms(method, center_data, fit_bias, with_errors, nterms,
normalization, data):
t, y, dy = data
if not with_errors:
dy = None
freq = 0.8 + 0.01 * | np.arange(40) | numpy.arange |
"""
Script to do SVD on the covariance matrix of the voxel by time matrix.
Run with:
python pca_script.py
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.linalg as npl
import nibabel as nib
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
# Relative paths to project and data.
project_path = "../../../"
path_to_data = project_path+"data/ds009/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
behav_suffix = "/behav/task001_run001/behavdata.txt"
sys.path.append(location_of_functions)
from Image_Visualizing import make_mask
# List of subject directories.
sub_list = os.listdir(path_to_data)
sub_list = [i for i in sub_list if 'sub' in i]
# Initialize array to store variance proportions.
masked_var_array = np.zeros((10, len(sub_list)))
# Loop through all the subjects.
for j in range(len(sub_list)):
name = sub_list[j]
# amount of beginning TRs not standardized at 6
behav=pd.read_table(path_to_data+name+behav_suffix,sep=" ")
num_TR = float(behav["NumTRs"])
# Load image data.
img = nib.load(path_to_data+ name+ "/BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data.astype(float)
# Load mask.
mask = nib.load(path_to_data+ name+'/anatomy/inplane001_brain_mask.nii.gz')
mask_data = mask.get_data()
# Drop the appropriate number of volumes from the beginning.
first_n_vols=data.shape[-1]
num_TR_cut=int(first_n_vols-num_TR)
data = data[...,num_TR_cut:]
# Now fit a mask to the 3-d image for each time point.
my_mask = np.zeros(data.shape)
for i in range(my_mask.shape[-1]):
my_mask[...,i] = make_mask(data[...,i], mask_data, fit=True)
# Reshape stuff to 2-d (voxel by time) and mask the data.
# This should cut down the number of volumes by more than 50%.
my_mask_2d = my_mask.reshape((-1,my_mask.shape[-1]))
data_2d = data.reshape((-1,data.shape[-1]))
masked_data_2d = data_2d[my_mask_2d.sum(1) != 0,:]
# Subtract means over voxels (columns).
data_2d = data_2d - np.mean(data_2d, 0)
masked_data_2d = masked_data_2d - | np.mean(masked_data_2d, 0) | numpy.mean |
import abc
import pathlib
import hashlib
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
class MapGeometry(abc.ABC):
def __init__(self, size):
self._size = size
self._separations = None
@property
def size(self):
"""The flattened size of this map.
"""
return self._size
@property
def separations(self):
"""The matrix of pairwise separations between map nodes.
Uses lazy evaluation. The matrix might be sparse.
"""
if self._separations is None:
self._separations = self._calculate_separations()
return self._separations
@abc.abstractmethod
def _calculate_separations(self):
raise NotImplementedError()
@abc.abstractmethod
def plot(self, values, ax=None, **kwargs):
"""Plot a representation of the specified values for this map.
Parameters
----------
values : array
1D array of length :attr:`size`
ax : matplotlib axis or None
Plot axis to use, or create a default axis when None.
kwargs : dict
Additional plot keyword arguments to pass to the implementation.
"""
raise NotImplementedError()
class Grid(MapGeometry):
def __init__(self, *signature, metric='L2'):
"""Create a rectilinear grid map geometry.
The grid shape is specified by the absolute values of the signature.
A negative value in the signature indicates that distances along the
corresponding axis are calculated using wrap around.
"""
shape = [abs(k) for k in signature]
x = np.empty(shape=shape)
super(Grid, self).__init__(x.size)
self._shape = x.shape
self._wrap = [k < 0 for k in signature]
if metric not in ('L0', 'L1', 'L2'):
raise ValueError('Invalid metric "{metric}", should be one of L0, L1, L2.')
self._metric = metric
@property
def shape(self):
return self._shape
@property
def wrap(self):
return self._wrap
@property
def metric(self):
return self._metric
def _calculate_separations(self):
ndim = len(self.shape)
separation = np.zeros(self.shape + self.shape)
# Loop over axes.
for k, wrapk in enumerate(self._wrap):
nk = self.shape[k]
xk = np.arange(nk)
# Calculate the the (nk, nk) matrix of absolute integer separations along the k-th axis.
dxk = np.abs(xk.reshape(nk, 1) - xk)
if wrapk:
W = dxk > nk // 2
dxk[W] *= -1
dxk[W] += nk
# Add this axis' contribution to the separation matrix.
S = [(nk if (j % ndim == k) else 1) for j in range(2 * ndim)]
dxk = dxk.reshape(S)
if self._metric == 'L2':
separation += dxk ** 2
elif self._metric == 'L1':
separation += dxk
elif self._metric == 'L0':
separation = np.maximum(separation, dxk)
if self._metric == 'L2':
# Take sqrt in place.
np.sqrt(separation, out=separation)
return separation.reshape(self.size, self.size)
def plot(self, values, ax=None, **kwargs):
"""Plot an image of the input values.
If the grid dimension is > 2, show a slice in the largest
2 dimensions.
"""
ndim = len(self.shape)
if ndim == 1:
# Plot a single row of values.
values = values.reshape(1, self.size)
elif ndim > 2:
# Plot a slice in the largest 2 dimensions.
largest = np.argsort(self.shape)[-2:]
idx = [slice(None) if k in largest else 1 for k in range(ndim)]
values = values[idx]
ax = ax or plt.gca()
ny, nx = values.shape
ax.imshow(values, interpolation='none', origin='lower',
extent=[-0.5, nx - 0.5, -0.5, ny - 0.5], **kwargs)
ax.axis('off')
def table_to_array(data):
colnames = data.colnames
# Doesn't work when data is a single row
data_arr = np.zeros((len(data),len(colnames)))
for k, name in enumerate(colnames):
data_arr[:,k] = data[name]
return(data_arr)
def get_signature(data, mapsize, maxiter):
"""Build a ~unique signature for the numpy array X suitable for cache file names.
"""
signature = np.concatenate((data.shape, np.argmin(data, axis=0), np.argmax(data, axis=0), (mapsize, maxiter)))
return hashlib.sha256(signature).hexdigest()
class SelfOrganizingMap(object):
def __init__(self, mapgeom):
self._mapgeom = mapgeom
def find_bmu(self, data, return_distances=False):
# Calculate best-matching cell for all inputs simultaneously:
if len(data.shape) > 1:
dx = data[:,:,np.newaxis] - self._weights
distsq = np.sum(dx ** 2, axis=1)
bmu = np.argmin(distsq, axis=1)
return(bmu)
elif len(data.shape) == 1:
# Calculate best-matching cell for a single input:
dx = data.reshape(-1, 1) - self._weights
distsq = np.sum(dx ** 2, axis=0)
bmu = np.argmin(distsq)
if return_distances: return(bmu, dx, distsq)
else: return(bmu)
def fit(self, data, maxiter=100, eta=0.5, init='random', seed=123, somz=False, verbose=False, save=pathlib.Path.cwd()):
sig = get_signature(data, self._mapgeom.size, maxiter)
pname_weights = save / pathlib.Path(f'trained_weights_{sig}.npy')
pname_loss = save / pathlib.Path(f'loss_{sig}.npy')
if np.logical_and(pname_weights.exists(), pname_loss.exists()):
self._weights = np.load(pname_weights).T
self._loss = np.load(pname_loss)
else:
rng = np.random.RandomState(seed)
self.data = data
# Reformat data if not a numpy array.
if type(self.data) is np.ndarray:
pass
else:
self.data = table_to_array(self.data)
N, D = self.data.shape
# Store loss values for every epoch.
self._loss = np.empty(maxiter)
if init == 'random':
sigmas = np.std(self.data, axis=0)
if somz:
self._weights = (rng.rand(D, self._mapgeom.size)) + data[0][0]
else:
self._weights = sigmas.reshape(-1, 1) * rng.normal(size=(D, self._mapgeom.size))
else:
raise ValueError('Invalid init "{}".'.format(init))
if somz:
print('Running SOMz mode...')
tt = 0
sigma0 = np.max(self._mapgeom.separations)
sigma_single = np.min(self._mapgeom.separations[np.where(self._mapgeom.separations > 0.)])
aps = 0.8
ape = 0.5
nt = maxiter * N
for it in range(maxiter):
loss = 0.
alpha = aps * (ape / aps) ** (tt / nt)
sigma = sigma0 * (sigma_single / sigma0) ** (tt / nt)
index_random = rng.choice(N, N, replace=False)
for i in range(N):
tt += 1
inputs = self.data[index_random[i]]
best = self.find_bmu(inputs)
h = np.exp(-(self._mapgeom.separations[best] ** 2) / sigma ** 2)
dx = inputs.reshape(-1, 1) - self._weights
loss += np.sqrt(np.sum(dx ** 2, axis=0))[best]
self._weights += alpha * h * dx
self._loss[it] = loss
print('Just finished iter = {}'.format(it))
else:
# Randomize data
rndm = rng.choice(np.arange(N), size=N, replace=False)
data = self.data[rndm]
# Calculate mean separation between grid points as a representative large scale.
large_scale = np.mean(self._mapgeom.separations)
for i in range(maxiter):
loss = 0.
learn_rate = eta ** (i / maxiter)
gauss_width = large_scale ** (1 - i / maxiter)
for j, x in enumerate(data):
# Calculate the Euclidean data-space distance squared between x and
# each map site's weight vector.
bmu, dx, distsq = self.find_bmu(x, return_distances=True)
# The loss is the sum of smallest (data space) distances for each data point.
loss += np.sqrt(distsq[bmu])
# Update all weights (dz are map-space distances).
dz = self._mapgeom.separations[bmu]
self._weights += learn_rate * np.exp(-0.5 * (dz / gauss_width) ** 2) * dx
self._loss[i] = loss
# Save trained SOM cell elements
np.save(pname_weights, self._weights.T)
np.save(pname_loss, self._loss)
def map(self, data, target):
## TO DO: need to handle empty cells.
## Find cell each training vector belongs to
self._indices = self.find_bmu(data)
## Get distribution of feature values for each cell
self._feature_dist = [data[self._indices == i] for i in range(self._mapgeom.size)]
self._target_dist = [target[self._indices == i] for i in range(self._mapgeom.size)]
## Should be mean or median?
self._target_vals = [np.mean(self._target_dist[i]) for i in range(self._mapgeom.size)]
self._target_pred = np.array(self._target_vals)[self._indices]
# Determine frequency of each index on SOM resolution grid
self._counts = np.bincount(self._indices, minlength=(self._mapgeom.size))
def plot_u_matrix(self, save=None):
'''
Visualize the weights in two dimensions.
* Add option to interpolate onto finer grid
From p. 337 of this paper https://link.springer.com/content/pdf/10.1007%2F978-3-642-15381-5.pdf'''
rows, cols = self._mapgeom.shape
u_matrix = np.empty((rows, cols))
for i in range(rows):
for j in range(cols):
dist = 0
## neighbor above
if i < rows - 1:
dist += np.sqrt(np.sum((self._weights[i,j] - self._weights[i+1,j]) ** 2))
## neighbor below
if i > 0:
dist += np.sqrt(np.sum((self._weights[i,j] - self._weights[i-1,j]) ** 2))
## neighbor left
if j > 0:
dist += np.sqrt(np.sum((self._weights[i,j] - self._weights[i,j-1]) ** 2))
## neighbor right
if j < cols - 1:
dist += np.sqrt(np.sum((self._weights[i,j] - self._weights[i,j+1]) ** 2))
u_matrix[i,j] = np.sum(dist)
plt.figure(figsize=(10,7))
plt.imshow(u_map, interpolation='none', origin='lower', cmap='viridis')
if save:
plt.savefig(save)
plt.show()
def plot_rgb(self, features=None, save=None):
'''Visualize the weights on an RGB scale using only three features.
If features isn't specified, then the first three features are used.
Inputs
------
features: List of indices for each feature to include in the map.'''
rows, cols = self._mapgeom.shape
weights = self._weights.T
# Normalize weights to be between [0,1]
weights = (weights - weights.min(axis=0)) / (weights.max(axis=0) - weights.min(axis=0))
# Select features to show in RGB map
if features:
rgb = weights[:,features]
else:
rgb = weights[:,:3]
rgb_map = rgb.reshape(rows, cols, 3)
plt.imshow(rgb_map, interpolation='none', origin='lower', cmap='viridis')
if save:
plt.savefig(save)
plt.show()
def map_to_som(self, data):
'''Takes input data of shape (N, features) and returns the predicted redshifts.'''
# Reformat data if not a numpy array.
if type(data) is np.ndarray:
pass
else:
data = table_to_array(data)
## Calculate distance between data weights and SOM weights to find
## best-matching cell for each input vector.
best = self.find_bmu(data)
## Mean redshift per cell
vals = np.array(self._target_vals)
return(vals[best])
def plot_counts_per_cell(self, norm=None, save=None):
'''Plot number of data points mapped to each SOM cell.'''
counts = self._counts.reshape(self._mapgeom.shape)
plt.figure(figsize=(10,7))
plt.imshow(counts, origin='lower', interpolation='none',
cmap='viridis', norm=norm)
plt.colorbar()
plt.title('Number per SOM cell')
if save:
plt.savefig(save)
plt.show()
def plot_statistic(self, feature=None, statistic=np.nanmean, return_stat=False, save=None):
## To do: handle empty cells
if feature:
fig, axs = plt.subplots(1,2, figsize=(12,5))
axs = axs.ravel()
# Plot statistic of feature per cell
stat = np.asarray([statistic(self._feature_dist[i][:,feature]) for i in range(self._mapgeom.size)])
im0 = axs[0].imshow(stat.reshape(self._mapgeom.shape), origin='lower', interpolation='none', cmap='viridis')
fig.colorbar(im0, ax=axs[0])
# Plot statistic of difference between feature weights and node weights per cell
diff = np.asarray([statistic(self._feature_dist[i] - self._weights.T[i], axis=0)[feature] for i in range(self._mapgeom.size)])
im1 = axs[1].imshow(diff.reshape(self._mapgeom.shape), origin='lower', interpolation='none', cmap='viridis')
fig.colorbar(im1, ax=axs[1])
if save:
plt.savefig(save)
plt.show()
else:
stat = np.asarray([statistic(self._target_dist[i]) for i in range(self._mapgeom.size)])
plt.figure(figsize=(10,7))
plt.imshow(stat.reshape(self._mapgeom.shape), origin='lower', interpolation='none', cmap='viridis')
plt.colorbar()
if save:
plt.savefig(save)
plt.show()
if return_stat:
return(stat)
def build_density(self, data, target, nbins=50):
bins = np.linspace(0, 3, nbins + 1)
density = np.zeros((nbins, nbins))
train_dist = self._target_dist
test_dat = table_to_array(data)
best = self.find_bmu(test_dat)
test_dist = [target[best == i] for i in range(self._mapgeom.size)]
for cell, dist in enumerate(train_dist):
if dist.size == 0:
pass
else:
test_hist, _ = np.histogram(test_dist[cell], bins)
train_rho, _ = np.histogram(dist, bins, density=True)
for zbin, nz in enumerate(test_hist):
density[:, zbin] += nz * train_rho
return(density)
def plot_sed(self, table, cell, save=None):
colnames = []
for col in table.colnames:
if 'sed' in col:
colnames.append(col)
in_cell = table[self._indices == cell]
if len(in_cell) == 0:
return('No galaxies were mapped to this cell.')
rnd = rng.choice(len(in_cell), size=1)
sed = in_cell[rnd]
plt.figure(figsize(10,7))
wlen = np.empty(len(colnames))
mags = np.empty(len(colnames))
for k, sed_col in enumerate(colnames):
to_jy = 1 / (4.4659e13 / (8.4 ** 2))
jy = sed[sed_col] * to_jy
ab = -2.5 * np.log10(jy / 3631)
start, width = colnames[k].split('_')[1:]
start, width = int(start), int(width)
wlen[k] = (start + (start + width)) / 2 # angstroms
mags[k] = ab
x = cell % np.abs(self._mapgeom.shape[0])
y = cell // | np.abs(self._mapgeom.shape[1]) | numpy.abs |
# @Author: <NAME> <gio>
# @Date: 10-Aug-2021
# @Email: <EMAIL>
# @Project: FeARLesS
# @Filename: 01_figurePositions.py
# @Last modified by: gio
# @Last modified time: 11-Aug-2021
# @License: MIT
import pandas as pd
import os
import tqdm
import struct
import matplotlib.pyplot as plt
import numpy as np
from itertools import product
from skimage.io import imread, imsave
####################################
# =============================================================================
# ### mac gio
# # path = "/Volumes/sharpe/data/Vascular_micromass/Opera/TIMELAPSE/" "Timelapse4_041021/"
# # folder_raw = os.path.join(path)
# =============================================================================
# ### linux gio
path = "/g/sharpe/data/Vascular_micromass/Opera/TIMELAPSE/" "Timelapse4_041021/"
folder_raw = os.path.join(path)
# =============================================================================
# ### windows nicola
# path = os.path.join(
# "data", "Vascular_micromass", "Opera", "TIMELAPSE", "Timelapse4_041021"
# )
# folder_raw = os.path.join("X:", os.sep, path)
# folder_raw = os.path.join('')
# exp_folder = os.path.join(
# "meta"
# )
# =============================================================================
exp_folder = os.path.join(
"gio_Pecam-Sox9_20x-24h_041021__2021-10-04T16_06_44-Measurement_1"
)
# name of wells/slides - correspond to columns row/col in the metadata.csv file
well_names = ["well1","well2"]
### how many samples there are in the same well (in the row and col direction)
# should be the same size as slide_names
n_cols = [
1, # well1
1 # well2
]
n_rows = [
1, # well1
1 # well2
]
# name of samples in every well
samplesNames = [
["A01"], # well1
['A02'] # well2
]
channel_list = [2, 1] # same for all the wells
luts_name = ["gray", "green"] # same for all the wells
####################################
def imagej_metadata_tags(metadata, byteorder):
"""Return IJMetadata and IJMetadataByteCounts tags from metadata dict.
The tags can be passed to the TiffWriter.save function as extratags.
"""
header = [{">": b"IJIJ", "<": b"JIJI"}[byteorder]]
bytecounts = [0]
body = []
def writestring(data, byteorder):
return data.encode("utf-16" + {">": "be", "<": "le"}[byteorder])
def writedoubles(data, byteorder):
return struct.pack(byteorder + ("d" * len(data)), *data)
def writebytes(data, byteorder):
return data.tobytes()
metadata_types = (
("Info", b"info", 1, writestring),
("Labels", b"labl", None, writestring),
("Ranges", b"rang", 1, writedoubles),
("LUTs", b"luts", None, writebytes),
("Plot", b"plot", 1, writebytes),
("ROI", b"roi ", 1, writebytes),
("Overlays", b"over", None, writebytes),
)
for key, mtype, count, func in metadata_types:
if key not in metadata:
continue
if byteorder == "<":
mtype = mtype[::-1]
values = metadata[key]
if count is None:
count = len(values)
else:
values = [values]
header.append(mtype + struct.pack(byteorder + "I", count))
for value in values:
data = func(value, byteorder)
body.append(data)
bytecounts.append(len(data))
body = b"".join(body)
header = b"".join(header)
data = header + body
bytecounts[0] = len(header)
bytecounts = struct.pack(byteorder + ("I" * len(bytecounts)), *bytecounts)
return (
(50839, "B", len(data), data, True),
(50838, "I", len(bytecounts) // 4, bytecounts, True),
)
def make_lut():
# generate LUT for primary and secondary colors
# Intensity value range
val_range = np.arange(256, dtype=np.uint8)
luts_dict = {}
# Gray LUT
luts_dict["gray"] = np.stack([val_range, val_range, val_range])
# Red LUT
luts_dict["red"] = np.zeros((3, 256), dtype=np.uint8)
luts_dict["red"][0, :] = val_range
# Green LUT
luts_dict["green"] = np.zeros((3, 256), dtype=np.uint8)
luts_dict["green"][1, :] = val_range
# Blue LUT
luts_dict["blue"] = np.zeros((3, 256), dtype=np.uint8)
luts_dict["blue"][2, :] = val_range
# Magenta LUT
luts_dict["magenta"] = np.zeros((3, 256), dtype=np.uint8)
luts_dict["magenta"][0, :] = val_range
luts_dict["magenta"][2, :] = val_range
# Cyan LUT
luts_dict["cyan"] = np.zeros((3, 256), dtype=np.uint8)
luts_dict["cyan"][1, :] = val_range
luts_dict["cyan"][2, :] = val_range
# Yellow LUT
luts_dict["yellow"] = np.zeros((3, 256), dtype=np.uint8)
luts_dict["yellow"][0, :] = val_range
luts_dict["yellow"][1, :] = val_range
# Orange LUT
luts_dict["orange"] = np.zeros((3, 256), dtype=np.uint8)
luts_dict["orange"][0, :] = val_range
luts_dict["orange"][1, :] = (165.0 * val_range / 256.0).astype(np.uint8)
# Maroon LUT
luts_dict["maroon"] = np.zeros((3, 256), dtype=np.uint8)
luts_dict["maroon"][0, :] = (128.0 * val_range / 256.0).astype(np.uint8)
return luts_dict
########################################
df = pd.read_csv(os.path.join(folder_raw, exp_folder, "metadata1.csv"))
print(df.head())
wells = df.drop_duplicates(subset=['row','col'])
wells = [[i.row,i.col] for j,i in wells.iterrows()]
s = 0
for well, well_name in zip(wells, well_names):
df_well = df[(df.row == well[0])&(df.col == well[1])]
xpos = list(set(df_well.Xpos))
xpos.sort()
ypos = list(set(df_well.Ypos))
ypos.sort()
if n_cols[s] == 1:
xlims = [
np.min(xpos),
np.max(xpos),
]
elif n_cols[s] == 2:
xlims = [
np.min(xpos),
(np.min(xpos) + np.max(xpos)) / 2,
np.max(xpos),
]
elif n_cols[s] == 3:
xlims = [
np.min(xpos),
(np.min(xpos) + (np.min(xpos) + np.max(xpos)) / 2) / 2,
(np.min(xpos) + np.max(xpos)) / 2,
(np.max(xpos) + (np.min(xpos) + np.max(xpos)) / 2) / 2,
np.max(xpos),
]
if n_rows[s] == 1:
ylims = [
np.min(ypos),
np.max(ypos),
]
elif n_rows[s] == 2:
ylims = [
np.min(ypos),
(np.min(ypos) + | np.max(ypos) | numpy.max |
import numpy as np
from corpus import thai_words
from abc import ABC, abstractmethod
from typing import List, Tuple, Union, NewType
from summarization.utils import sentence_segment as sent_seg
from summarization.utils import word_tokenize as tokenize
from summarization.utils import stopwords, get_stem
SentenceVector = NewType('SentenceVector', type(np.array([])))
TFVector = NewType('TFVector', type(np.array([])))
IDFVector = NewType('IDFVector', type(np.array([])))
WeightedWordVector = NewType('WeightedWordVector', type( | np.array([]) | numpy.array |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import time
import json
import numpy as np
import numba as nb
from enum import Enum
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Core.snapshot_pf_data import compile_snapshot_circuit
from GridCal.Engine.Simulations.LinearFactors.linear_analysis import LinearAnalysis, make_worst_contingency_transfer_limits
from GridCal.Engine.Simulations.driver_types import SimulationTypes
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.Simulations.results_table import ResultsTable
from GridCal.Engine.Simulations.results_template import ResultsTemplate
from GridCal.Engine.Simulations.driver_template import DriverTemplate
########################################################################################################################
# Optimal Power flow classes
########################################################################################################################
class AvailableTransferMode(Enum):
Generation = 0
InstalledPower = 1
Load = 2
GenerationAndLoad = 3
@nb.njit()
def compute_alpha(ptdf, P0, Pinstalled, idx1, idx2, bus_types, dT=1.0, mode=0):
"""
Compute all lines' ATC
:param ptdf: Power transfer distribution factors (n-branch, n-bus)
:param P0: all bus injections [p.u.]
:param idx1: bus indices of the sending region
:param idx2: bus indices of the receiving region
:param bus_types: Array of bus types {1: pq, 2: pv, 3: slack}
:param dT: Exchange amount
:param mode: Type of power shift
0: shift generation based on the current generated power
1: shift generation based on the installed power
2: shift load
3 (or else): shift using generation and load
:return: Exchange sensitivity vector for all the lines
"""
nbr = ptdf.shape[0]
nbus = ptdf.shape[1]
# declare the bus injections increment due to the transference
dP = np.zeros(nbus)
if mode == 0: # move the generators based on the generated power --------------------
# set the sending power increment proportional to the current power (Area 1)
n1 = 0.0
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n1 += P0[i]
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = dT * P0[i] / abs(n1)
# set the receiving power increment proportional to the current power (Area 2)
n2 = 0.0
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n2 += P0[i]
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = -dT * P0[i] / abs(n2)
elif mode == 1: # move the generators based on the installed power --------------------
# set the sending power increment proportional to the current power (Area 1)
n1 = 0.0
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n1 += Pinstalled[i]
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = dT * Pinstalled[i] / abs(n1)
# set the receiving power increment proportional to the current power (Area 2)
n2 = 0.0
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n2 += Pinstalled[i]
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = -dT * Pinstalled[i] / abs(n2)
elif mode == 2: # move the load ------------------------------------------------------
# set the sending power increment proportional to the current power (Area 1)
n1 = 0.0
for i in idx1:
if bus_types[i] == 1: # it is a PV or slack node
n1 += P0[i]
for i in idx1:
if bus_types[i] == 1: # it is a PV or slack node
dP[i] = dT * P0[i] / abs(n1)
# set the receiving power increment proportional to the current power (Area 2)
n2 = 0.0
for i in idx2:
if bus_types[i] == 1: # it is a PV or slack node
n2 += P0[i]
for i in idx2:
if bus_types[i] == 1: # it is a PV or slack node
dP[i] = -dT * P0[i] / abs(n2)
else: # move all of it -----------------------------------------------------------------
# set the sending power increment proportional to the current power
n1 = 0.0
for i in idx1:
n1 += P0[i]
for i in idx1:
dP[i] = dT * P0[i] / abs(n1)
# set the receiving power increment proportional to the current power
n2 = 0.0
for i in idx2:
n2 += P0[i]
for i in idx2:
dP[i] = -dT * P0[i] / abs(n2)
# ----------------------------------------------------------------------------------------
# compute the line flow increments due to the exchange increment dT in MW
dflow = ptdf.dot(dP)
# compute the sensitivity
alpha = dflow / dT
return alpha
@nb.njit()
def compute_atc(br_idx, ptdf, lodf, alpha, flows, rates, contingency_rates, threshold=0.005):
"""
Compute all lines' ATC
:param br_idx: array of branch indices to analyze
:param ptdf: Power transfer distribution factors (n-branch, n-bus)
:param lodf: Line outage distribution factors (n-branch, n-outage branch)
:param alpha: Branch sensitivities to the exchange [p.u.]
:param flows: branches power injected at the "from" side [MW]
:param rates: all branches rates vector
:param contingency_rates: all branches contingency rates vector
:param threshold: value that determines if a line is studied for the ATC calculation
:return:
beta_mat: Matrix of beta values (branch, contingency_branch)
beta: vector of actual beta value used for each branch (n-branch)
atc_n: vector of ATC values in "N" (n-branch)
atc_final: vector of ATC in "N" or "N-1" whatever is more limiting (n-branch)
atc_limiting_contingency_branch: most limiting contingency branch index vector (n-branch)
atc_limiting_contingency_flow: most limiting contingency flow vector (n-branch)
"""
nbr = len(br_idx)
# explore the ATC
atc_n = np.zeros(nbr)
atc_mc = np.zeros(nbr)
atc_final = np.zeros(nbr)
beta_mat = np.zeros((nbr, nbr))
beta_used = np.zeros(nbr)
atc_limiting_contingency_branch = np.zeros(nbr)
atc_limiting_contingency_flow = np.zeros(nbr)
# processed = list()
# mm = 0
for im, m in enumerate(br_idx): # for each branch
if abs(alpha[m]) > threshold and abs(flows[m]) < rates[m]: # if the branch is relevant enough for the ATC...
# compute the ATC in "N"
if alpha[m] == 0:
atc_final[im] = np.inf
elif alpha[m] > 0:
atc_final[im] = (rates[m] - flows[m]) / alpha[m]
else:
atc_final[im] = (-rates[m] - flows[m]) / alpha[m]
# remember the ATC in "N"
atc_n[im] = atc_final[im]
# set to the current branch, since we don't know if there will be any contingency that make the ATC worse
atc_limiting_contingency_branch[im] = m
# explore the ATC in "N-1"
for ic, c in enumerate(br_idx): # for each contingency
# compute the exchange sensitivity in contingency conditions
beta_mat[im, ic] = alpha[m] + lodf[m, c] * alpha[c]
if m != c:
# compute the contingency flow
contingency_flow = flows[m] + lodf[m, c] * flows[c]
# set the default values (worst contingency by itself, not comparing with the base situation)
if abs(contingency_flow) > abs(atc_limiting_contingency_flow[im]):
atc_limiting_contingency_flow[im] = contingency_flow # default
atc_limiting_contingency_branch[im] = c
# now here, do compare with the base situation
if abs(beta_mat[im, ic]) > threshold and abs(contingency_flow) <= contingency_rates[m]:
# compute the ATC in "N-1"
if beta_mat[im, ic] == 0:
atc_mc[im] = np.inf
elif beta_mat[im, ic] > 0:
atc_mc[im] = (contingency_rates[m] - contingency_flow) / beta_mat[im, ic]
else:
atc_mc[im] = (-contingency_rates[m] - contingency_flow) / beta_mat[im, ic]
# refine the ATC to the most restrictive value every time
if abs(atc_mc[im]) < abs(atc_final[im]):
atc_final[im] = atc_mc[im]
beta_used[im] = beta_mat[im, ic]
atc_limiting_contingency_flow[im] = contingency_flow
atc_limiting_contingency_branch[im] = c
return beta_mat, beta_used, atc_n, atc_mc, atc_final, atc_limiting_contingency_branch, atc_limiting_contingency_flow
class AvailableTransferCapacityResults(ResultsTemplate):
def __init__(self, n_bus, br_names, bus_names, bus_types, bus_idx_from, bus_idx_to, br_idx):
"""
:param n_bus:
:param br_names:
:param bus_names:
:param bus_types:
:param bus_idx_from:
:param bus_idx_to:
:param br_idx:
"""
ResultsTemplate.__init__(self,
name='ATC Results',
available_results=[ResultTypes.AvailableTransferCapacity,
ResultTypes.NetTransferCapacity,
ResultTypes.AvailableTransferCapacityN,
ResultTypes.AvailableTransferCapacityAlpha,
ResultTypes.AvailableTransferCapacityBeta,
ResultTypes.AvailableTransferCapacityReport
],
data_variables=['alpha',
'beta_mat',
'beta',
'atc',
'atc_n',
'atc_limiting_contingency_branch',
'atc_limiting_contingency_flow',
'base_flow',
'rates',
'contingency_rates',
'report',
'report_headers',
'report_indices',
'branch_names',
'bus_names',
'bus_types',
'bus_idx_from',
'bus_idx_to',
'br_idx'])
self.n_br = len(br_idx)
self.n_bus = n_bus
self.branch_names = np.array(br_names, dtype=object)
self.bus_names = bus_names
self.bus_types = bus_types
self.bus_idx_from = bus_idx_from
self.bus_idx_to = bus_idx_to
self.br_idx = br_idx
# stores the worst transfer capacities (from to) and (to from)
self.rates = np.zeros(self.n_br)
self.contingency_rates = np.zeros(self.n_br)
self.base_exchange = 0
self.alpha = | np.zeros(self.n_br) | numpy.zeros |
import os
import sys
import numpy as np
import time
import matplotlib.pyplot as plt
import pandas as pd
from utils import *
def sliding_dot_product(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m:n]
def sliding_dot_product_stomp(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m - 1:n]
def calculate_distance_profile(q, t, qt, a, sum_q, sum_q2, mean_t, sigma_t):
n = t.size
m = q.size
b = np.zeros(n - m)
dist = | np.zeros(n - m) | numpy.zeros |
import os
import sys
import numpy as np
import cv2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
from nuscenes2kitti_object import nuscenes2kitti_object
import ipdb
from PIL import Image
def pto_depth_map(velo_points,
H=32, W=256, C=5, dtheta=np.radians(1.33), dphi=np.radians(90. / 256.0)):
"""
Ref:https://github.com/Durant35/SqueezeSeg/blob/master/src/nodes/segment_node.py
Project velodyne points into front view depth map.
:param velo_points: velodyne points in shape [:,4]
:param H: the row num of depth map, could be 64(default), 32, 16
:param W: the col num of depth map
:param C: the channel size of depth map
3 cartesian coordinates (x; y; z),
an intensity measurement and
range r = sqrt(x^2 + y^2 + z^2)
:param dtheta: the delta theta of H, in radian
:param dphi: the delta phi of W, in radian
:return: `depth_map`: the projected depth map of shape[H,W,C]
"""
x, y, z, i = velo_points[:, 1], -velo_points[:, 0], velo_points[:, 2], velo_points[:, 3]
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
r = np.sqrt(x ** 2 + y ** 2)
d[d == 0] = 0.000001
r[r == 0] = 0.000001
phi = np.radians(45.) - np.arcsin(y / r)
phi_ = (phi / dphi).astype(int)
phi_[phi_ < 0] = 0
phi_[phi_ >= W] = W-1
# print(np.min(phi_))
# print(np.max(phi_))
#
# print z
# print np.radians(2.)
# print np.arcsin(z/d)
theta = np.radians(2.) - np.arcsin(z / d)
# print theta
theta_ = (theta / dtheta).astype(int)
# print theta_
theta_[theta_ < 0] = 0
theta_[theta_ >= H] = H-1
# print theta,phi,theta_.shape,phi_.shape
# print(np.min((phi/dphi)),np.max((phi/dphi)))
# np.savetxt('./dump/'+'phi'+"dump.txt",(phi_).astype(np.float32), fmt="%f")
# np.savetxt('./dump/'+'phi_'+"dump.txt",(phi/dphi).astype(np.float32), fmt="%f")
# print(np.min(theta_))
# print(np.max(theta_))
depth_map = np.zeros((H, W, C))
# 5 channels according to paper
if C == 5:
depth_map[theta_, phi_, 0] = x
depth_map[theta_, phi_, 1] = y
depth_map[theta_, phi_, 2] = z
depth_map[theta_, phi_, 3] = i
depth_map[theta_, phi_, 4] = d
else:
depth_map[theta_, phi_, 0] = i
return depth_map
def keep_32(velo_points,
H=64, W=512, C=5, dtheta=np.radians(1.33), dphi=np.radians(90. / 512.0), odd=False,scale=1):
x, y, z= velo_points[:, 0], velo_points[:, 1], velo_points[:, 2]
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
r = np.sqrt(x ** 2 + y ** 2)
d[d == 0] = 0.000001
r[r == 0] = 0.000001
phi = np.radians(45.) - np.arcsin(y / r)
phi_ = (phi / dphi).astype(int)
phi_[phi_ < 0] = 0
phi_[phi_ >= W] = W-1
theta = | np.radians(2.) | numpy.radians |
import abc
from typing import Tuple
import numpy as np
from sklearn.metrics import auc
from sklearn.utils.multiclass import type_of_target
_available_uplift_modes = ("qini", "cum_gain", "adj_qini")
class ConstPredictError(Exception):
pass
class TUpliftMetric(metaclass=abc.ABCMeta):
@abc.abstractclassmethod
def __call__(self, y_true: np.ndarray, uplift_pred: np.ndarray, treatment: np.ndarray) -> float:
pass
def perfect_uplift_curve(y_true: np.ndarray, treatment: np.ndarray):
"""Calculate perfect curve
Method return curve's coordinates if the model is a perfect.
Perfect model ranking:
If type if 'y_true' is 'binary':
1) Treatment = 1, Target = 1
2) Treatment = 0, Target = 0
3) Treatment = 1, Target = 0
4) Treatment = 0, Target = 1
If type if 'y_true' is 'continuous':
Not implemented
Args:
y_true: Target values
treatment: Treatment column
Returns:
perfect curve
"""
if type_of_target(y_true) == "continuous" and np.any(y_true < 0.0):
raise Exception("For a continuous target, the perfect curve is only available for non-negative values")
if type_of_target(y_true) == "binary":
perfect_control_score = (treatment == 0).astype(int) * (2 * (y_true != 1).astype(int) - 1)
perfect_treatment_score = (treatment == 1).astype(int) * 2 * (y_true == 1).astype(int)
perfect_uplift = perfect_treatment_score + perfect_control_score
elif type_of_target(y_true) == "continuous":
raise NotImplementedError("Can't calculate perfect curve for continuous target")
else:
raise RuntimeError("Only 'binary' and 'continuous' targets are available")
return perfect_uplift
def _get_uplift_curve(
y_treatment: np.ndarray,
y_control: np.ndarray,
n_treatment: np.ndarray,
n_control: np.ndarray,
mode: str,
):
"""Calculate uplift curve
Args:
y_treatment: Cumulative number of target in treatment group
y_control: Cumulative number of target in control group
num_treatment: Cumulative number of treatment group
num_control: Cumulative number of treatment group
mode: Name of available metrics
Returns:
curve for current mode
"""
assert mode in _available_uplift_modes, "Mode isn't available"
if mode == "qini":
curve_values = y_treatment / n_treatment[-1] - y_control / n_control[-1]
elif mode == "cum_gain":
treatment_target_rate = np.nan_to_num(y_treatment / n_treatment, 0.0)
control_target_rate = np.nan_to_num(y_control / n_control, 0.0)
curve_values = treatment_target_rate - control_target_rate
n_join = n_treatment + n_control
curve_values = curve_values * n_join / n_join[-1]
elif mode == "adj_qini":
normed_factor = np.nan_to_num(n_treatment / n_control, 0.0)
normed_y_control = y_control * normed_factor
curve_values = (y_treatment - normed_y_control) / n_treatment[-1]
return curve_values
def calculate_graphic_uplift_curve(
y_true: np.ndarray,
uplift_pred: np.ndarray,
treatment: np.ndarray,
mode: str = "adj_qini",
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate uplift curve
Args:
y_true: Target values
uplift: Prediction of models
treatment: Treatment column
mode: Name of available metrics
Returns:
xs, ys - curve's coordinates
"""
# assert not np.all(uplift_pred == uplift_pred[0]), "Can't calculate uplift curve for constant predicts"
if np.all(uplift_pred == uplift_pred[0]):
raise ConstPredictError("Can't calculate uplift curve for constant predicts")
if type_of_target(y_true) == "continuous" and np.any(y_true < 0.0):
raise Exception("For a continuous target, the perfect curve is only available for non-negative values")
sorted_indexes = np.argsort(uplift_pred)[::-1]
y_true, uplift_pred, treatment = (
y_true[sorted_indexes],
uplift_pred[sorted_indexes],
treatment[sorted_indexes],
)
indexes = np.where(np.diff(uplift_pred))[0]
indexes = np.insert(indexes, indexes.size, uplift_pred.shape[0] - 1)
n_treatment_samples_cs = np.cumsum(treatment)[indexes].astype(np.int64)
n_join_samples_cs = indexes + 1
n_control_samples_cs = n_join_samples_cs - n_treatment_samples_cs
y_true_control, y_true_treatment = y_true.copy(), y_true.copy()
y_true_control[treatment == 1] = 0
y_true_treatment[treatment == 0] = 0
y_true_control_cs = np.cumsum(y_true_control)[indexes]
y_true_treatment_cs = np.cumsum(y_true_treatment)[indexes]
curve_values = _get_uplift_curve(
y_true_treatment_cs,
y_true_control_cs,
n_treatment_samples_cs,
n_control_samples_cs,
mode,
)
n_join_samples = np.insert(n_join_samples_cs, 0, 0)
curve_values = np.insert(curve_values, 0, 0)
rate_join_samples = n_join_samples / n_join_samples[-1]
return rate_join_samples, curve_values
def calculate_uplift_auc(
y_true: np.ndarray,
uplift_pred: np.ndarray,
treatment: np.ndarray,
mode: str = "adj_qini",
normed: bool = False,
):
"""Calculate area under uplift curve
Args:
y_true: Target values
uplift_pred: Prediction of meta model
treatment: Treatment column
mode: Name of available metrics
normed: Normed AUC: (AUC - MIN_AUC) / (MAX_AUC - MIN_AUC)
Returns:
auc_score: Area under model uplift curve
"""
xs, ys = calculate_graphic_uplift_curve(y_true, uplift_pred, treatment, mode)
uplift_auc = auc(xs, ys)
if normed:
min_auc, max_auc = calculate_min_max_uplift_auc(y_true, treatment, mode)
uplift_auc = (uplift_auc - min_auc) / (max_auc - min_auc)
return uplift_auc
def calculate_min_max_uplift_auc(y_true: np.ndarray, treatment: np.ndarray, mode: str = "adj_qini"):
"""Calculate AUC uplift curve for `base` and `perfect` models
Args:
y_true: Target values
treatment: Treatment column
mode: Name of available metrics
Returns:
auc_base: Area under `base`.
auc_perfect: Area under `perfect` model curve
"""
diff_target_rate = y_true[treatment == 1].mean() - y_true[treatment == 0].mean()
xs_base, ys_base = np.array([0, 1]), np.array([0, diff_target_rate])
perfect_uplift = perfect_uplift_curve(y_true, treatment)
xs_perfect, ys_perfect = calculate_graphic_uplift_curve(y_true, perfect_uplift, treatment, mode)
auc_base = auc(xs_base, ys_base)
auc_perfect = auc(xs_perfect, ys_perfect)
return auc_base, auc_perfect
def calculate_uplift_at_top(y_true: np.ndarray, uplift_pred: np.ndarray, treatment: np.ndarray, top: float = 30):
"""Calculate Uplift metric at TOP
Calculate uplift metric at top
Args:
y_true: Target values
uplift_pred: Prediction of meta model
treatment: Treatment column
top: Rate, value between (0, 100]
Returns:
score: Score
"""
# assert not np.all(uplift_pred == uplift_pred[0]), "Can't calculate for constant predicts."
uplift_percentile = np.percentile(uplift_pred, 100 - top)
mask_top = uplift_pred > uplift_percentile
control_true_top = y_true[(treatment == 0) & mask_top].sum()
treatment_true_top = y_true[(treatment == 1) & mask_top].sum()
n_control_samples = (treatment[mask_top] == 0).sum()
n_treatment_samples = (treatment[mask_top] == 1).sum()
mean_control_value = control_true_top / n_control_samples if n_control_samples > 0 else 0.0
mean_treatment_value = treatment_true_top / n_treatment_samples if n_treatment_samples > 0 else 0.0
score = mean_treatment_value - mean_control_value
return score
def calculate_total_score(y_true: np.ndarray, uplift_pred: np.ndarray, treatment: np.ndarray, top: float = 30):
"""Calculate total target
Args:
y_true: Target values
uplift_pred: Prediction of meta model
treatment: Treatment column
top: Rate, value between (0, 100]
Returns:
score: Score
"""
uplift_percentile = | np.percentile(uplift_pred, 100 - top) | numpy.percentile |
import numpy as np
import os
from functools import partial
import logging
# Used to set default value for workers
from multiprocessing import cpu_count
import naturalselection as ns
class NN(ns.Genus):
''' Feedforward fully connected neural network genus.
INPUT:
(int) max_nm_hidden_layers
(bool) uniform_layers: whether all hidden layers should
have the same amount of neurons and dropout
(iterable) input_dropout: values for input dropout
(iterable) : values for dropout after hidden layers
(iterable) neurons_per_hidden_layer = neurons in hidden layers
(iterable) optimizer: keras optimizers
(iterable) hidden_activation: keras activation functions
(iterable) batch_size: batch sizes
(iterable) initializer: keras initializers
'''
def __init__(self, max_nm_hidden_layers = 5, uniform_layers = False,
input_dropout = np.arange(0, 0.6, 0.1),
hidden_dropout = np.arange(0, 0.6, 0.1),
neurons = np.array([2 ** n for n in range(4, 11)]),
optimizer = np.array(['adamax', 'adam', 'nadam']),
hidden_activation = np.array(['relu', 'elu']),
batch_size = np.array([2 ** n for n in range(4, 7)]),
initializer = np.array(['lecun_uniform', 'lecun_normal',
'glorot_uniform', 'glorot_normal',
'he_uniform', 'he_normal'])):
self.optimizer = np.unique(np.asarray(optimizer))
self.hidden_activation = np.unique(np.asarray(hidden_activation))
self.batch_size = np.unique(np.asarray(batch_size))
self.initializer = np.unique( | np.asarray(initializer) | numpy.asarray |
# coding: utf-8
# # Broadcasting on a spectrum - One component model
# In[ ]:
from astropy.io import fits
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
from scipy.stats import chisquare
from PyAstronomy.pyasl import dopplerShift
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib')
# In[ ]:
def one_comp_model(wav, model1, gammas):
# Make 1 component simulations, broadcasting over gamma values.
# Enable single scalar inputs (turn to 1d np.array)
if not hasattr(gammas, "__len__"):
gammas = np.asarray(gammas)[np.newaxis]
print(len(gammas))
m1 = model1
print(model1.shape)
m1g = np.empty(model1.shape + (len(gammas),)) # am2rvm1g = am2rvm1 with gamma doppler-shift
print(m1g.shape)
for j, gamma in enumerate(gammas):
wav_j = (1 + gamma / 299792.458) * wav
m1g[:, j] = interp1d(wav_j, m1, axis=0, bounds_error=False)(wav)
return interp1d(w, m1g, axis=0) # pass it the wavelength values to return
# In[ ]:
# Load in the data
wav = "/home/jneal/Phd/data/phoenixmodels/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits"
host = "/home/jneal/Phd/data/phoenixmodels/HD30501-lte05200-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
comp = "/home/jneal/Phd/data/phoenixmodels/HD30501b-lte02500-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
w = fits.getdata(wav) / 10
h = fits.getdata(host)
c = fits.getdata(comp)
# In[ ]:
mask = (2111 < w) & (w < 2117)
w = w[mask]
h = h[mask]
c = c[mask]
# crude normalization
h = h / | np.max(h) | numpy.max |
# Copyright (C) 2017 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: <NAME>, <NAME>
import sys
import numpy as np
from hydra.utils import instantiate
from mocasin.representations import MappingRepresentation
import mocasin.util.random_distributions.lp as lp
from mocasin.util import logging
log = logging.getLogger(__name__)
class Volume(object):
def __init__(self):
log.debug("create default volume")
def adapt(vol):
log.debug("adapt volume")
return vol
def shrink(vol):
log.debug("shrink volume")
return vol
class Cube(Volume):
def __init__(
self,
graph,
platform,
representation,
center,
radius=1.0,
max_step=10,
max_pe=16,
):
# define initial cube with radius 1 at the given center
self.center = center.to_list()
self.radius = radius
self.dim = len(center)
# https://stackoverflow.com/questions/4984647/accessing-dict-keys-like-an-attribute
def adapt_center(self, s_set):
fs_set = list(map(lambda s: s.sample, s_set.get_feasible()))
if not fs_set:
return self.center
# take mean of feasible points as new center
m = np.mean(fs_set, axis=0)
self.center = np.around(m).tolist()
return self.center
def correct_center(self, s_set, center, old_center):
# shortest points to center
d_cur = list(map(lambda s: [s.dist(center), s], s_set.get_feasible()))
d_cur = sorted(d_cur, key=lambda x: x[0])
nearest_samples = []
for s in d_cur:
if s[0] == d_cur[0][0]:
nearest_samples.append(s[1])
# take (first) shortest point to old center from that result
d_old = list(
map(lambda s: [s.dist(old_center), s], s_set.get_feasible())
)
d_old = sorted(d_old, key=lambda x: x[0])
for s in d_old:
if s[1] in nearest_samples:
return s[1].sample
return None
def adapt_volume(self, s_set, target_p, s_val):
fs_set = list(map(lambda s: s.sample, s_set.get_feasible()))
# adjust radius
p = len(s_set.get_feasible()) / len(s_set.sample_set)
log.debug("---------- adapt_volume() -----------")
log.debug(
"p-factors: {} {}".format(
s_set.get_feasible(), len(s_set.sample_set)
)
)
if p >= target_p:
# simple adaptation: cube does not support shape adaption
log.debug(
"extend at p: {:f} target_p {:f} r: {:f}".format(
p, target_p, self.radius
)
)
self.extend(s_val)
else:
log.debug(
"shrink at p: {:f} target_p {:f} r: {:f}".format(
p, target_p, self.radius
)
)
self.shrink(s_val)
return p
def shrink(self, step):
# shink volume by one on each border
self.radius = self.radius - 1 if (self.radius - 1 > 0) else self.radius
def extend(self, step):
# extend volume by one on each border
self.radius = (
self.radius + step * self.max_step
if (self.radius + step * self.max_step < self.max_pe)
else self.radius
)
class LPVolume(Volume):
def __init__(
self,
graph,
platform,
representation,
center,
radius,
adaptable_center_weights=True,
aggressive_center_movement=False,
adapt_samples=0,
):
# This is a workaround until Hydra 1.1 (with recursive instantiaton!)
if not issubclass(type(type(representation)), MappingRepresentation):
representation = instantiate(representation, graph, platform)
self.representation = representation
self.graph = graph
self.platform = platform
self.adaptable_center_weights = adaptable_center_weights
self.adapt_samples = adapt_samples
self.aggressive_center_movement = aggressive_center_movement
self.center = np.array(self.representation.toRepresentation(center))
log.debug(f"Initializing center with representation:{self.center}")
self.old_center = self.center
self.radius = radius
self.dim = len(self.center)
self.true_dim = len(graph.processes())
if not hasattr(self.representation, "p"):
raise RuntimeError("Representation does not have a norm")
self.norm_p = representation.p
self.weight_center = 1 / (np.exp(1) * self.dim)
self.rk1_learning_constant = 1 / | np.sqrt(self.true_dim) | numpy.sqrt |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from .analysis_gauss_numerical_integration import gauss_numerical_integration
from .exoplanet_orbit import exoplanet_orbit
def integral_r_claret(limb_darkening_coefficients, r):
a1, a2, a3, a4 = limb_darkening_coefficients
mu44 = 1.0 - r * r
mu24 = np.sqrt(mu44)
mu14 = np.sqrt(mu24)
return - (2.0 * (1.0 - a1 - a2 - a3 - a4) / 4) * mu44 \
- (2.0 * a1 / 5) * mu44 * mu14 \
- (2.0 * a2 / 6) * mu44 * mu24 \
- (2.0 * a3 / 7) * mu44 * mu24 * mu14 \
- (2.0 * a4 / 8) * mu44 * mu44
def num_claret(r, limb_darkening_coefficients, rprs, z):
a1, a2, a3, a4 = limb_darkening_coefficients
rsq = r * r
mu44 = 1.0 - rsq
mu24 = np.sqrt(mu44)
mu14 = np.sqrt(mu24)
return ((1.0 - a1 - a2 - a3 - a4) + a1 * mu14 + a2 * mu24 + a3 * mu24 * mu14 + a4 * mu44) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_claret(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_claret, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for zero method
def integral_r_zero(limb_darkening_coefficients, r):
musq = 1 - r * r
return (-1.0 / 6) * musq * 3.0
def num_zero(r, limb_darkening_coefficients, rprs, z):
rsq = r * r
return r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_zero(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_zero, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for linear method
def integral_r_linear(limb_darkening_coefficients, r):
a1 = limb_darkening_coefficients[0]
musq = 1 - r * r
return (-1.0 / 6) * musq * (3.0 + a1 * (-3.0 + 2.0 * np.sqrt(musq)))
def num_linear(r, limb_darkening_coefficients, rprs, z):
a1 = limb_darkening_coefficients[0]
rsq = r * r
return (1.0 - a1 * (1.0 - np.sqrt(1.0 - rsq))) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_linear(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_linear, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for quadratic method
def integral_r_quad(limb_darkening_coefficients, r):
a1, a2 = limb_darkening_coefficients[:2]
musq = 1 - r * r
mu = np.sqrt(musq)
return (1.0 / 12) * (-4.0 * (a1 + 2.0 * a2) * mu * musq + 6.0 * (-1 + a1 + a2) * musq + 3.0 * a2 * musq * musq)
def num_quad(r, limb_darkening_coefficients, rprs, z):
a1, a2 = limb_darkening_coefficients[:2]
rsq = r * r
cc = 1.0 - np.sqrt(1.0 - rsq)
return (1.0 - a1 * cc - a2 * cc * cc) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_quad(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_quad, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for square root method
def integral_r_sqrt(limb_darkening_coefficients, r):
a1, a2 = limb_darkening_coefficients[:2]
musq = 1 - r * r
mu = np.sqrt(musq)
return ((-2.0 / 5) * a2 * np.sqrt(mu) - (1.0 / 3) * a1 * mu + (1.0 / 2) * (-1 + a1 + a2)) * musq
def num_sqrt(r, limb_darkening_coefficients, rprs, z):
a1, a2 = limb_darkening_coefficients[:2]
rsq = r * r
mu = np.sqrt(1.0 - rsq)
return (1.0 - a1 * (1 - mu) - a2 * (1.0 - np.sqrt(mu))) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_sqrt(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_sqrt, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# dictionaries containing the different methods,
# if you define a new method, include the functions in the dictionary as well
integral_r = {
'claret': integral_r_claret,
'linear': integral_r_linear,
'quad': integral_r_quad,
'sqrt': integral_r_sqrt,
'zero': integral_r_zero
}
integral_r_f = {
'claret': integral_r_f_claret,
'linear': integral_r_f_linear,
'quad': integral_r_f_quad,
'sqrt': integral_r_f_sqrt,
'zero': integral_r_f_zero,
}
def integral_centred(method, limb_darkening_coefficients, rprs, ww1, ww2):
return (integral_r[method](limb_darkening_coefficients, rprs)
- integral_r[method](limb_darkening_coefficients, 0.0)) * np.abs(ww2 - ww1)
def integral_plus_core(method, limb_darkening_coefficients, rprs, z, ww1, ww2, precision=3):
if len(z) == 0:
return z
rr1 = z * np.cos(ww1) + np.sqrt(np.maximum(rprs ** 2 - (z * np.sin(ww1)) ** 2, 0))
rr1 = | np.clip(rr1, 0, 1) | numpy.clip |
import matplotlib.pyplot as plt
import matplotlib as mpl
import time
from datetime import datetime
import pytz
import numpy as np
import threading
import requests as req
import json
# ################################### #
# @title Get Fields
url_field = "url_field" # @param {type:"string"}
# api_name_field = "report/reports/steps" # @param {type:"string"}
duration = 3 # @param {type:"integer"} #Definition: total test time (in seconds)
clients = 20 # @param {type:"integer"} #Definition: number of clients per second
rounds = 1 # @param {type:"integer"} #Definition: number of rounds
request_timeout = 10 # @param {type:"integer"} #Definition: timeout after request_timeout seconds
sleep_after_each_round = 0 # @param {type:"integer"} #Definition: time to sleep after finishing each round (in seconds)
result_log_path = "results" # @param {type:"string"}
gdrive = "gdrive_path"
api_package = {
"keys": ["api_path", "payload_path", "method", "name"],
"values": [
["report/reports/steps", gdrive+"payload_name.txt", "get", "steps-get"],
]
}
# ################################### #
""" Global Variables """
my_mutex = threading.Lock()
start_time = time.time()
end_time = time.time()
dt = datetime.now(tz=pytz.timezone("Asia/Tehran"))
save_name = ((str(dt).split(" ")[0] + "-" + str(dt).split(" ")[1]).split("+")[0]).split(".")[0].replace(":", "-")
REPORTS = {
"perSecond": {
"response_time": [],
"ok200_res_time": [],
"ok1000_res_time": [],
"err500_400_res_time": [],
},
"perTest": {
"avg_res_time": [],
"min_res_time": [],
"max_res_time": [],
"avg_ok200_res_time": [],
"avg_ok1000_res_time": [],
"avg_err500_400_res_time": [],
"total_requests": 0,
"ok": 0,
"err": 0,
"timeouts": 0,
"mean_avg": 0,
"global_max": [0, 0], # x,y
"global_min": [0, np.inf]
}
}
class Thrd(threading.Thread):
def __init__(self, url, api_name, params, method="get"):
threading.Thread.__init__(self)
self.url = url
self.api = api_name
self.params = params
self.method = method
def run(self):
global REPORTS, my_mutex
my_mutex.acquire()
REPORTS["perTest"]["total_requests"] += 1
my_mutex.release()
try:
if self.method == "get":
r = req.get(self.url + self.api, self.params, timeout=request_timeout, verify=False)
else:
r = req.get(self.url + self.api, self.params, timeout=request_timeout, verify=False)
if r.status_code == 200:
my_mutex.acquire()
REPORTS["perSecond"]["ok200_res_time"].append(r.elapsed.total_seconds())
REPORTS["perTest"]["ok"] += 1
my_mutex.release()
data = r.json()
if data["error_code"] == "1000":
my_mutex.acquire()
REPORTS["perSecond"]["ok1000_res_time"].append(r.elapsed.total_seconds())
my_mutex.release()
else:
my_mutex.acquire()
REPORTS["perSecond"]["err500_400_res_time"].append(r.elapsed.total_seconds())
REPORTS["perTest"]["err"] += 1
my_mutex.release()
my_mutex.acquire()
REPORTS["perSecond"]["response_time"].append(r.elapsed.total_seconds())
my_mutex.release()
except ValueError as Argument:
my_mutex.acquire()
s = "Exception on Thrd->run: ".format(Argument)
print(s, file=open(f'{result_log_path}-{save_name}', 'a'))
print(s)
REPORTS["perSecond"]["timeouts"] += 1
my_mutex.release()
def start_test():
global end_time
thrds = []
url = url_field
url = url + "/" if url[-1] != "/" else url
# with open('user_data_path' 'r') as file:
payload = []
for api in api_package["values"]:
with open(api[1], "r") as file:
payload.append(json.load(file))
# Each payload has keys , values
n_apis = len(api_package["values"])
used_api_names = []
j = 0
for i in range(clients):
params = {}
api_index = np.random.randint(0, n_apis)
payload_len = len(payload[api_index])
for index, key in enumerate(payload[api_index]["keys"]):
params[key] = payload[api_index]["values"][j][index]
thrds.append(Thrd(url, api_package["values"][api_index][0], params, api_package["values"][api_index][2]))
j = (j + 1) % payload_len
used_api_names.append(api_package["values"][api_index][3])
for thrd in thrds:
thrd.start()
end_time = time.time()
for thrd in thrds:
thrd.join()
return used_api_names
def run_trigger():
global REPORTS, start_time, end_time
for report in REPORTS["perSecond"]:
REPORTS["perSecond"][report] = []
for report in REPORTS["perTest"]:
REPORTS["perTest"][report] = []
REPORTS["perTest"]["total_requests"] = 0
REPORTS["perTest"]["timeouts"] = 0
REPORTS["perTest"]["ok"] = 0
REPORTS["perTest"]["err"] = 0
REPORTS["perTest"]["global_min"] = [0, np.inf]
REPORTS["perTest"]["global_max"] = [0, 0]
REPORTS["perTest"]["mean_avg"] = 0
for i in range(rounds):
s = "Round {}".format(i + 1)
print(s, file=open(f'{result_log_path}-{save_name}', 'a'))
print(s)
for j in range(duration):
REPORTS["perSecond"]["response_time"] = []
REPORTS["perSecond"]["ok200_res_time"] = []
REPORTS["perSecond"]["ok1000_res_time"] = []
REPORTS["perSecond"]["err500_400_res_time"] = []
start_time = time.time()
# /// START TEST /// #
u_apis = start_test()
elapsed_time = end_time - start_time
time.sleep(1-elapsed_time if elapsed_time+0.01 < 1 else 0)
REPORTS["perTest"]["avg_res_time"].append(np.average(np.array(REPORTS["perSecond"]["response_time"])))
REPORTS["perTest"]["min_res_time"].append(np.min( | np.array(REPORTS["perSecond"]["response_time"]) | numpy.array |
import json
import os
import time
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.legend import Legend
from sampler.NestedSampling import GlobalNestedSampler
from slam.Variables import R2Variable, VariableType
from factors.Factors import UncertainR2RangeGaussianLikelihoodFactor, UncertainUnaryR2RangeGaussianPriorFactor
import random
from scipy.spatial import distance_matrix as dist_mat_fun
from slam.FactorGraphSimulator import factor_graph_to_string
import multiprocessing as mp
from utils.Functions import NumpyEncoder
from utils.Visualization import plot_2d_samples
if __name__ == '__main__':
np.random.seed(1)
random.seed(1)
data = np.loadtxt("raw_data", delimiter=',', dtype=str)[1:,:]
data = data.astype(float)
# the second last col is ID
sorted_idx = np.argsort(data[:, -2])
sorted_data = data[sorted_idx]
print(sorted_data[0])
known_marker = "^"
unknown_marker = "o"
sorted_color = ['r', 'b', 'g', 'tab:purple', 'tab:orange', 'yellow', 'tab:brown', 'tab:pink', 'tab:olive', 'tab:cyan','black']
known_idx = np.where(sorted_data[:, -1] == 1)[0]
print("Known indices are ", known_idx)
unknown_idx = np.where(sorted_data[:, -1] == 0)[0]
print("Unknown indices are ", unknown_idx)
# plotting the ground truthpy
known_lgd = []
unknown_lgd = []
fig, axs = plt.subplots()
color_cnt = 0
known_labels = []
unknown_labels = []
known_plot = []
unknown_plot = []
# including observations
adj_mat = np.zeros((len(sorted_data), len(sorted_data)))
adj_mat[0, [5, 4, 3]] = 1
adj_mat[1, [2, 3]] = 1
adj_mat[2, [1,5, 4]] = 1
adj_mat[3, [1, 0]] = 1
adj_mat[4, [0, 2]] = 1
adj_mat[5, [0, 2]] = 1
data_dict = {}
print(adj_mat-adj_mat.T)
assert np.all(np.abs(adj_mat-adj_mat.T) < 1e-5) == True
for i, line in enumerate(sorted_data):
x, y, id, ifknow = line
id = int(id)
data_dict[i] = {'x': x, 'y': y, 'known': ifknow, "connect": np.where(adj_mat[i] == 1)[0]}
if color_cnt >= len(sorted_color):
c = "black"
else:
c = sorted_color[color_cnt]
color_cnt += 1
if ifknow:
known_labels.append(str(id))
handle = axs.scatter([x], [y], marker = known_marker, c = c)
known_lgd.append(handle)
else:
unknown_labels.append(str(id))
handle = axs.scatter([x], [y], marker = unknown_marker, c = c)
unknown_lgd.append(handle)
for target in data_dict[i]['connect']:
if target < i:
t_x = data_dict[target]['x']
t_y = data_dict[target]['y']
axs.plot([t_x, x],[t_y, y], '--', color = 'grey')
# lgd1 = plt.legend(known_lgd, loc = "upper right")
axs.legend(known_lgd, known_labels, bbox_to_anchor=(.98, 1.0), loc='upper left', frameon = False, title="Known")
leg = Legend(axs, unknown_lgd, unknown_labels, bbox_to_anchor=(.98, .5), loc='upper left', frameon=False, title="Unknown")
axs.add_artist(leg)
axs.set_xlabel("x")
axs.set_ylabel("y")
xlim = [-1.2, 1.8]
ylim = [-1.2, 1.8]
axs.set_xlim(xlim)
axs.set_ylim(ylim)
axs.set_aspect('equal', 'box')
plt.subplots_adjust(right = .8)
plt.savefig("gt.png", dpi = 300, bbox_to_anchor = "tight")
plt.show()
# generating factor graphs
unobsv_sigma = .3
sigma = .02
# dist mat
meas = np.loadtxt("measurements", dtype = str)[1:, :].astype(float)
noisy_dist_mat = | np.zeros_like(adj_mat) | numpy.zeros_like |
from omegaconf import DictConfig, OmegaConf
import hydra
from hydra.core.hydra_config import HydraConfig
import numpy as np
import os
import os.path as osp
import pandas as pd
import geopandas as gpd
import pickle
from yaml import Loader, load
import itertools as it
import networkx as nx
import scipy.stats as stats
@hydra.main(config_path="conf", config_name="config")
def evaluate(cfg: DictConfig):
"""
Evaluation of fluxes and source/sink terms predicted by FluxRGNN.
Predicted quantities are compared with ground truth quantities from simulations.
If H_min==H_max, quantities are evaluated per hour, otherwise quantities are
aggregated over H_max-H_min time steps.
"""
base_dir = cfg.device.root
experiment = cfg.get('experiment', 'final')
result_dir = osp.join(base_dir, cfg.output_dir, cfg.datasource.name, cfg.model.name, f'test_{cfg.datasource.test_year}', experiment)
data_dir = osp.join(base_dir, 'data', 'preprocessed',
f'{cfg.t_unit}_{cfg.model.edge_type}_ndummy={cfg.datasource.n_dummy_radars}',
cfg.datasource.name, cfg.season, str(cfg.datasource.test_year))
H_min = cfg.get('H_min', 1)
H_max = cfg.get('H_max', 24)
voronoi = gpd.read_file(osp.join(data_dir, 'voronoi.shp'))
print(result_dir)
results, _ = load_cv_results(result_dir, trials=cfg.task.repeats)
output_dir = osp.join(result_dir, 'performance_evaluation', f'{H_min}-{H_max}')
os.makedirs(output_dir, exist_ok=True)
voronoi = evaluate_source_sink(cfg, results, H_min, H_max, voronoi, data_dir, output_dir)
evaluate_fluxes(cfg, results, H_min, H_max, voronoi, data_dir, result_dir, output_dir)
def evaluate_fluxes(cfg:DictConfig, results, H_min, H_max, voronoi, data_dir, result_dir, output_dir):
context = cfg.model.context
#result_dir = osp.join(cfg.device.root, cfg.output_dir, cfg.datasource.name, cfg.model.name,
# f'test_{cfg.datasource.test_year}', cfg.experiment)
boundary_idx = voronoi.query('observed == 0').index.values
model_fluxes = load_model_fluxes(result_dir, trials=cfg.task.repeats)
G = nx.read_gpickle(osp.join(data_dir, 'delaunay.gpickle'))
if cfg.datasource.name == 'abm':
# load abm fluxes
with open(osp.join(data_dir, 'time.pkl'), 'rb') as f:
abm_time = pickle.load(f)
time_dict = {t: idx for idx, t in enumerate(abm_time)}
gt_fluxes = np.load(osp.join(data_dir, 'outfluxes.npy'))
# rearange abm fluxes to match model fluxes
gt_fluxes_H = []
gt_months = []
for s in sorted(results.groupby('seqID').groups.keys()):
df = results.query(f'seqID == {s}')
time = sorted(df.datetime.unique())
gt_months.append(pd.DatetimeIndex(time).month[context + 1])
agg_gt_fluxes = np.stack([gt_fluxes[time_dict[pd.Timestamp(time[context + h])]]
for h in range(H_min, H_max + 1)], axis=0).sum(0)
gt_fluxes_H.append(agg_gt_fluxes)
gt_fluxes = np.stack(gt_fluxes_H, axis=-1)
gt_months = np.stack(gt_months, axis=-1)
# exclude "self-fluxes"
for i in range(gt_fluxes.shape[0]):
gt_fluxes[i, i] = np.nan
# exclude boundary to boundary fluxes
for i, j in it.product(boundary_idx, repeat=2):
gt_fluxes[i, j] = np.nan
# net fluxes
gt_net_fluxes = gt_fluxes - np.moveaxis(gt_fluxes, 0, 1)
overall_corr = {}
corr_influx = []
corr_outflux = []
corr_d2b = []
corr_angles = []
bin_fluxes = []
all_fluxes = dict(model_flux=[], gt_flux=[], radar1=[], radar2=[], trial=[])
# loop over all trials
print(model_fluxes.keys())
for t, model_fluxes_t in model_fluxes.items():
print(f'evaluate fluxes for trial {t}')
seqIDs = sorted(model_fluxes_t.keys())
model_fluxes_t = np.stack([model_fluxes_t[s].detach().numpy()[..., H_min:H_max + 1].sum(-1) for s in seqIDs],
axis=-1)
model_net_fluxes_t = model_fluxes_t - np.moveaxis(model_fluxes_t, 0, 1)
if cfg.datasource.name == 'abm':
mask = np.isfinite(gt_net_fluxes)
print(mask.shape)
overall_corr[t] = np.corrcoef(gt_net_fluxes[mask].flatten(),
model_net_fluxes_t[mask].flatten())[0, 1]
bin_results = bin_metrics_fluxes(model_net_fluxes_t, gt_net_fluxes)
bin_results = pd.DataFrame(bin_results)
bin_results['trial'] = t
bin_fluxes.append(bin_results)
print(t)
for i, ri in voronoi.iterrows():
for j, rj in voronoi.iterrows():
if not i == j:
#print(model_net_fluxes_t[mask].shape)
#print(model_net_fluxes_t.shape)
all_fluxes['model_flux'].extend(model_net_fluxes_t[i,j])
all_fluxes['gt_flux'].extend(gt_net_fluxes[i,j])
length = model_net_fluxes_t[i,j].size
all_fluxes['radar1'].extend([ri['radar']] * length)
all_fluxes['radar2'].extend([rj['radar']] * length)
all_fluxes['trial'].extend([t] * length)
corr_influx_per_month = dict(month=[], corr=[], trial=[])
corr_outflux_per_month = dict(month=[], corr=[], trial=[])
for m in np.unique(gt_months):
idx = np.where(gt_months == m)
gt_influx_m = np.nansum(gt_fluxes[..., idx], axis=1)
gt_outflux_m = np.nansum(gt_fluxes[..., idx], axis=0)
model_influx_m = np.nansum(model_fluxes_t[..., idx], axis=1)
model_outflux_m = np.nansum(model_fluxes_t[..., idx], axis=0)
mask = np.isfinite(gt_influx_m)
corr = np.corrcoef(gt_influx_m[mask].flatten(),
model_influx_m[mask].flatten())[0, 1]
corr_influx_per_month['corr'].append(corr)
corr_influx_per_month['month'].append(m)
corr_influx_per_month['trial'].append(t)
mask = np.isfinite(gt_outflux_m)
corr = np.corrcoef(gt_outflux_m[mask].flatten(),
model_outflux_m[mask].flatten())[0, 1]
corr_outflux_per_month['corr'].append(corr)
corr_outflux_per_month['month'].append(m)
corr_outflux_per_month['trial'].append(t)
corr_influx.append(pd.DataFrame(corr_influx_per_month))
corr_outflux.append(pd.DataFrame(corr_outflux_per_month))
d2b_index = fluxes_per_dist2boundary(G, voronoi)
corr_per_d2b = dict(d2b=[], corr=[], trial=[])
for d2b, index in d2b_index.items():
model_net_fluxes_d2b = model_net_fluxes_t[index['idx'], index['jdx']]
gt_net_fluxes_d2b = gt_net_fluxes[index['idx'], index['jdx']]
mask = np.isfinite(gt_net_fluxes_d2b)
corr = stats.pearsonr(model_net_fluxes_d2b[mask].flatten(),
gt_net_fluxes_d2b[mask].flatten())[0]
corr_per_d2b['d2b'].append(d2b)
corr_per_d2b['corr'].append(corr)
corr_per_d2b['trial'].append(t)
corr_d2b.append(pd.DataFrame(corr_per_d2b))
angle_index = fluxes_per_angle(G)
corr_per_angle = dict(angle=[], rad=[], corr=[], trial=[])
for angle, index in angle_index.items():
model_net_fluxes_a = model_net_fluxes_t[index['idx'], index['jdx']]
gt_net_fluxes_a = gt_net_fluxes[index['idx'], index['jdx']]
mask = np.isfinite(gt_net_fluxes_a)
corr = stats.pearsonr(model_net_fluxes_a[mask].flatten(),
gt_net_fluxes_a[mask].flatten())[0]
corr_per_angle['angle'].append(angle)
corr_per_angle['rad'].append(angle / 360 * 2 * np.pi)
corr_per_angle['corr'].append(corr)
corr_per_angle['trial'].append(t)
corr_angles.append(pd.DataFrame(corr_per_angle))
if H_min == H_max:
agg_func = np.nansum
else:
agg_func = np.nanmean
G_model = fluxes_on_graph(voronoi, G, model_net_fluxes_t, agg_func=agg_func)
nx.write_gpickle(G_model, osp.join(output_dir, f'model_fluxes_{t}.gpickle'), protocol=4)
boundary_radars = voronoi.query('boundary == True').radar.values
G_boundary = fluxes_on_graph(voronoi, G, model_net_fluxes_t, agg_func=agg_func, radars=boundary_radars)
voronoi[f'net_flux_{t}'] = voronoi.apply(lambda row: total_net_flux(G_boundary, row.name), axis=1)
if cfg.datasource.name == 'abm':
G_flux_corr = corr_on_graph(voronoi, G, gt_net_fluxes, model_net_fluxes_t)
nx.write_gpickle(G_flux_corr, osp.join(output_dir, f'flux_corr_{t}.gpickle'), protocol=4)
if t == 1:
G_gt = fluxes_on_graph(voronoi, G, gt_net_fluxes, agg_func=agg_func)
nx.write_gpickle(G_gt, osp.join(output_dir, 'gt_fluxes.gpickle'), protocol=4)
voronoi.to_csv(osp.join(output_dir, 'voronoi_summary.csv'))
if cfg.datasource.name == 'abm':
corr_d2b = pd.concat(corr_d2b)
corr_angles = pd.concat(corr_angles)
bin_fluxes = pd.concat(bin_fluxes)
corr_influx = pd.concat(corr_influx)
corr_outflux = pd.concat(corr_outflux)
corr_d2b.to_csv(osp.join(output_dir, 'agg_corr_d2b_per_trial.csv'))
corr_angles.to_csv(osp.join(output_dir, 'agg_corr_angles_per_trial.csv'))
bin_fluxes.to_csv(osp.join(output_dir, 'agg_bins_per_trial.csv'))
corr_influx.to_csv(osp.join(output_dir, 'agg_corr_influx_per_month.csv'))
corr_outflux.to_csv(osp.join(output_dir, 'agg_corr_outflux_per_month.csv'))
all_fluxes = pd.DataFrame(all_fluxes)
all_fluxes.to_csv(osp.join(output_dir, 'all_fluxes_per_trial.csv'))
with open(osp.join(output_dir, 'agg_overall_corr.pickle'), 'wb') as f:
pickle.dump(overall_corr, f, pickle.HIGHEST_PROTOCOL)
def evaluate_source_sink(cfg:DictConfig, results, H_min, H_max, voronoi, data_dir, output_dir):
inner_radars = voronoi.query('observed == 1').radar.values
radar_dict = voronoi.radar.to_dict()
radar_dict = {v: k for k, v in radar_dict.items()}
area_scale = results.area.max()
df = results.query(f'horizon <= {H_max} & horizon >= {H_min}')
df = df[df.radar.isin(inner_radars)]
df['month'] = pd.DatetimeIndex(df.datetime).month
grouped = df.groupby(['radar', 'trial'])
def get_net_source_sink(radar, trial):
if radar in inner_radars:
df = grouped.get_group((radar, trial)).aggregate(np.nansum)
return df['source_km2'] - df['sink_km2']
else:
return np.nan
for t in df.trial.unique():
voronoi[f'net_source_sink_{t}'] = voronoi.apply(lambda row:
get_net_source_sink(row.radar, t), axis=1)
if cfg.datasource.name == 'abm':
dep = np.load(osp.join(data_dir, 'departing_birds.npy'))
land = np.load(osp.join(data_dir, 'landing_birds.npy'))
with open(osp.join(data_dir, 'time.pkl'), 'rb') as f:
abm_time = pickle.load(f)
time_dict = {t: idx for idx, t in enumerate(abm_time)}
def get_abm_data(data, datetime, radar, bird_scale=1):
tidx = time_dict[pd.Timestamp(datetime)]
ridx = radar_dict[radar]
return data[tidx, ridx] / bird_scale
corr_source = dict(month=[], trial=[], corr=[])
corr_sink = dict(month=[], trial=[], corr=[])
source_agg = dict(trial=[], gt=[], model=[])
sink_agg = dict(trial=[], gt=[], model=[])
for m in df.month.unique():
print(f'evaluate month {m}')
for t in df.trial.unique():
data = df.query(f'month == {m} & trial == {t}')
print(f'compute abm source/sink for trial {t}')
gt_source_km2 = []
gt_sink_km2 = []
for i, row in data.iterrows():
gt_source_km2.append(get_abm_data(dep, row['datetime'], row['radar']) / (row['area'] / area_scale))
gt_sink_km2.append(get_abm_data(land, row['datetime'], row['radar']) / (row['area'] / area_scale))
data['gt_source_km2'] = gt_source_km2
data['gt_sink_km2'] = gt_sink_km2
grouped = data.groupby(['seqID', 'radar'])
grouped = grouped[['gt_source_km2', 'source_km2', 'gt_sink_km2', 'sink_km2']].aggregate(
np.nansum).reset_index()
source_agg['gt'].extend(grouped.gt_source_km2.values)
source_agg['model'].extend(grouped.source_km2.values)
source_agg['trial'].extend([t] * len(grouped.gt_source_km2))
sink_agg['gt'].extend(grouped.gt_sink_km2.values)
sink_agg['model'].extend(grouped.sink_km2.values)
sink_agg['trial'].extend([t] * len(grouped.gt_sink_km2))
corr = np.corrcoef(grouped.gt_source_km2.to_numpy(),
grouped.source_km2.to_numpy())[0, 1]
corr_source['month'].append(m)
corr_source['trial'].append(t)
corr_source['corr'].append(corr)
corr = np.corrcoef(grouped.gt_sink_km2.to_numpy(),
grouped.sink_km2.to_numpy())[0, 1]
corr_sink['month'].append(m)
corr_sink['trial'].append(t)
corr_sink['corr'].append(corr)
corr_source = pd.DataFrame(corr_source)
corr_sink = pd.DataFrame(corr_sink)
corr_source.to_csv(osp.join(output_dir, 'agg_source_corr_per_month_and_trial.csv'))
corr_sink.to_csv(osp.join(output_dir, 'agg_sink_corr_per_month_and_trial.csv'))
source_agg = pd.DataFrame(source_agg)
sink_agg = pd.DataFrame(sink_agg)
corr_source_all = dict(trial=[], corr=[])
corr_sink_all = dict(trial=[], corr=[])
for t in df.trial.unique():
source_agg_t = source_agg.query(f'trial == {t}')
corr_source_all['corr'].append(np.corrcoef(source_agg_t['gt'].to_numpy(),
source_agg_t.model.to_numpy())[0, 1])
corr_source_all['trial'].append(t)
sink_agg_t = sink_agg.query(f'trial == {t}')
corr_sink_all['corr'].append(np.corrcoef(sink_agg_t['gt'].to_numpy(),
sink_agg_t.model.to_numpy())[0, 1])
corr_sink_all['trial'].append(t)
corr_source_all = pd.DataFrame(corr_source_all)
corr_source_all.to_csv(osp.join(output_dir, 'agg_source_corr_per_trial.csv'))
corr_sink_all = pd.DataFrame(corr_sink_all)
corr_sink_all.to_csv(osp.join(output_dir, 'agg_sink_corr_per_trial.csv'))
return voronoi
def load_cv_results(result_dir, ext='', trials=1):
result_list = []
for t in range(1, trials + 1):
file = osp.join(result_dir, f'trial_{t}', f'results{ext}.csv')
print(file)
if osp.isfile(file):
df = pd.read_csv(file)
df['trial'] = t
result_list.append(df)
cfg_file = osp.join(result_dir, f'trial_{t}', 'config.yaml')
with open(cfg_file) as f:
cfg = load(f, Loader=Loader)
results = pd.concat(result_list)
return results, cfg
def load_model_fluxes(result_dir, ext='', trials=1):
fluxes = {}
for t in range(1, trials + 1):
file = osp.join(result_dir, f'trial_{t}', f'model_fluxes{ext}.pickle')
if osp.isfile(file):
with open(file, 'rb') as f:
fluxes[t] = pickle.load(f)
return fluxes
def flux_corr_per_dist2boundary(voronoi, model_fluxes, gt_fluxes):
# shortest paths to any boundary cell
sp = nx.shortest_path(G)
d_to_b = np.zeros(len(G))
for ni, datai in G.nodes(data=True):
min_d = np.inf
for nj, dataj in G.nodes(data=True):
if dataj['boundary']:
d = len(sp[ni][nj])
if d < min_d:
min_d = d
d_to_b[ni] = d
voronoi['dist2boundary'] = d_to_b
df = dict(radar1=[], radar2=[], corr=[], dist2boundary=[])
for i, rowi in voronoi.iterrows():
for j, rowj in voronoi.iterrows():
if not i == j:
df['radar1'].append(rowi['radar'])
df['radar2'].append(rowj['radar'])
df['dist2boundary'].append(int(min(rowi['dist2boundary'], rowj['dist2boundary'])))
if not np.all(model_fluxes[i, j] == 0) and not np.all(gt_fluxes[i, j] == 0) and np.all(
np.isfinite(gt_fluxes[i, j])):
df['corr'].append(stats.pearsonr(gt_fluxes[i, j].flatten(), model_fluxes[i, j].flatten())[0])
else:
df['corr'].append(np.nan)
df = pd.DataFrame(df)
return df
def fluxes_per_dist2boundary(G, voronoi):
# shortest paths to any boundary cell
sp = nx.shortest_path(G)
d2b = np.zeros(len(G))
for ni, datai in G.nodes(data=True):
min_d = np.inf
for nj, dataj in G.nodes(data=True):
if dataj['boundary']:
d = len(sp[ni][nj])
if d < min_d:
min_d = d
d2b[ni] = d
voronoi['dist2boundary'] = d2b
d2b_index = {}
for i, j in G.edges():
d2b = int(voronoi.iloc[i]['dist2boundary'])
if not d2b in d2b_index.keys():
d2b_index[d2b] = dict(idx=[], jdx=[])
d2b_index[d2b]['idx'].append(i)
d2b_index[d2b]['jdx'].append(j)
return d2b_index
def fluxes_per_angle(G, bins=12):
angle_index = {}
bins = np.linspace(0, 360, bins + 1)
binc = (bins[1:] + bins[:-1]) / 2
for i, j, data in G.edges(data=True):
angle = (data['angle'] + 360) % 360
angle_bin = | np.where(bins < angle) | numpy.where |
from collections import namedtuple
import netCDF4
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from cloudnetpy.categorize import atmos
from cloudnetpy.products.lwc import CloudAdjustor, Lwc, LwcError, LwcSource
DIMENSIONS = ("time", "height", "model_time", "model_height")
TEST_ARRAY = np.arange(3)
CategorizeBits = namedtuple("CategorizeBits", ["category_bits", "quality_bits"])
@pytest.fixture(scope="session")
def lwc_source_file(tmpdir_factory, file_metadata):
file_name = tmpdir_factory.mktemp("data").join("file.nc")
with netCDF4.Dataset(file_name, "w", format="NETCDF4_CLASSIC") as root_grp:
_create_dimensions(root_grp)
_create_dimension_variables(root_grp)
var = root_grp.createVariable("altitude", "f8")
var[:] = 1
var.units = "km"
var = root_grp.createVariable("lwp", "f8", "time")
var[:] = [1, 1, 0.5]
var = root_grp.createVariable("lwp_error", "f8", "time")
var[:] = [0.2, 0.2, 0.1]
var = root_grp.createVariable("rain_rate", "i4", "time")
var[:] = [0, 1, 1]
var = root_grp.createVariable("category_bits", "i4", "time")
var[:] = [0, 1, 2]
var = root_grp.createVariable("quality_bits", "i4", "time")
var[:] = [8, 16, 32]
var = root_grp.createVariable("temperature", "f8", ("time", "height"))
var[:] = np.array([[282, 280, 278], [286, 284, 282], [284, 282, 280]])
var = root_grp.createVariable("pressure", "f8", ("time", "height"))
var[:] = np.array([[1010, 1000, 990], [1020, 1010, 1000], [1030, 1020, 1010]])
return file_name
def _create_dimensions(root_grp):
n_dim = len(TEST_ARRAY)
for dim_name in DIMENSIONS:
root_grp.createDimension(dim_name, n_dim)
def _create_dimension_variables(root_grp):
for dim_name in DIMENSIONS:
x = root_grp.createVariable(dim_name, "f8", (dim_name,))
x[:] = TEST_ARRAY
if dim_name == "height":
x.units = "m"
def test_get_atmosphere_t(lwc_source_file):
obj = LwcSource(lwc_source_file)
expected = np.array([[282, 280, 278], [286, 284, 282], [284, 282, 280]])
assert_array_equal(obj.atmosphere[0], expected)
def test_get_atmosphere_p(lwc_source_file):
obj = LwcSource(lwc_source_file)
expected = np.array([[1010, 1000, 990], [1020, 1010, 1000], [1030, 1020, 1010]])
assert_array_equal(obj.atmosphere[-1], expected)
class LwcSourceObj(LwcSource):
def __init__(self):
self.dheight = 10
self.categorize_bits = CategorizeBits(
category_bits={"droplet": np.asarray([[1, 0, 1], [0, 1, 1]], dtype=bool)},
quality_bits={
"radar": np.asarray([[1, 0, 1], [0, 1, 1]], dtype=bool),
"lidar": np.asarray([[1, 0, 1], [0, 1, 1]], dtype=bool),
},
)
self.atmosphere = (
np.array([[282, 281, 280], [280, 279, 278]]),
np.array([[101000, 100500, 100000], [100000, 99500, 99000]]),
)
self.lwp = np.array([2, 0])
self.lwp_error = np.array([0.1, 0.2])
self.is_rain = np.array([0, 1])
LWC_OBJ = Lwc(LwcSourceObj())
STATUS_OBJ = CloudAdjustor(LwcSourceObj(), LWC_OBJ)
ERROR_OBJ = LwcError(LwcSourceObj(), LWC_OBJ)
@pytest.mark.parametrize("value", [0, 1])
def test_get_liquid(value):
assert value in LWC_OBJ.is_liquid
def test_init_lwc_adiabatic():
lwc_source = LwcSourceObj()
expected = atmos.fill_clouds_with_lwc_dz(lwc_source.atmosphere, LWC_OBJ.is_liquid)
expected[0, 0] *= 10
expected[0, 2] *= 10
expected[1, 1] *= 10
expected[1, 2] *= 20
assert_array_almost_equal(LWC_OBJ._init_lwc_adiabatic(), expected)
def test_screen_rain_lwc():
expected = np.ma.array([[5, 1, 2], [3, 6, 0]], mask=[[0, 0, 0], [1, 1, 1]])
assert_array_equal(expected.mask, LWC_OBJ.lwc.mask)
@pytest.mark.parametrize("value", [0, 1])
def test_init_status(value):
assert value in STATUS_OBJ._init_status()
@pytest.mark.parametrize("key", ["radar", "lidar"])
def test_get_echo(key):
assert key in STATUS_OBJ.echo.keys()
@pytest.mark.parametrize("value", [0, 1, 2])
def test_update_status(value):
time = np.array([0])
STATUS_OBJ._update_status(time)
assert value in STATUS_OBJ.status
@pytest.mark.parametrize("value", [0, 1, 2, 3])
def test_adjust_lwc(value):
time = 0
base = 0
STATUS_OBJ.status = np.array([[1, 0, 2], [0, 0, 2]])
STATUS_OBJ._adjust_lwc(time, base)
assert value in STATUS_OBJ.status
def test_has_converged():
ind = 1
assert STATUS_OBJ._has_converged(ind) is True
def test_out_of_bound():
ind = 2
assert STATUS_OBJ._out_of_bound(ind) is True
def test_find_adjustable_clouds():
assert 1 not in STATUS_OBJ._find_adjustable_clouds()
def test_find_topmost_clouds():
expected = np.asarray([[0, 0, 1], [0, 1, 1]], dtype=bool)
assert_array_equal(STATUS_OBJ._find_topmost_clouds(), expected)
def test_find_echo_combinations_in_liquid():
STATUS_OBJ.echo["lidar"] = np.array([[0, 1, 0], [1, 1, 0]])
STATUS_OBJ.echo["radar"] = np.array([[0, 0, 0], [0, 1, 1]])
STATUS_OBJ.is_liquid = np.array([[1, 1, 1], [0, 1, 1]])
expected = np.array([[0, 1, 0], [0, 3, 2]])
assert_array_equal(STATUS_OBJ._find_echo_combinations_in_liquid(), expected)
def test_find_lidar_only_clouds():
inds = np.array([[1, 0, 0], [0, 1, 3]])
expected = np.array([True, False])
assert_array_equal(STATUS_OBJ._find_lidar_only_clouds(inds), expected)
def test_remove_good_profiles():
top_c = np.asarray([[1, 1, 0], [1, 0, 1]], dtype=bool)
expected = np.asarray([[1, 1, 0], [0, 0, 0]], dtype=bool)
assert_array_equal(STATUS_OBJ._remove_good_profiles(top_c), expected)
def test_find_lwp_difference():
STATUS_OBJ.lwc_adiabatic = np.array([[1, 8, 2], [2, 3, 7]])
STATUS_OBJ.lwc_source.lwp = np.array([50, 30])
expected = np.array([60, 90])
assert_array_equal(STATUS_OBJ._find_lwp_difference(), expected)
@pytest.mark.parametrize("value", [0, 1, 2, 3, 4])
def test_screen_rain_status(value):
STATUS_OBJ.lwc_source.is_rain = np.array([0, 1])
STATUS_OBJ.status = np.array([[0, 2, 2, 3, 1], [1, 3, 0, 2, 2]])
STATUS_OBJ._mask_rain()
assert value in STATUS_OBJ.status
def test_limit_error():
error = np.array([[0, 0, 1], [0.2, 0.4, 0.3]])
max_v = 0.5
expected = np.array([[0, 0, 0.5], [0.2, 0.4, 0.3]])
assert_array_equal(ERROR_OBJ._limit_error(error, max_v), expected)
def test_calc_lwc_gradient():
from cloudnetpy.utils import l2norm
ERROR_OBJ.lwc = np.ma.array([[0.1, 0.2, 0.3], [0.1, 0.3, 0.6]])
expected = l2norm(*np.gradient(ERROR_OBJ.lwc))
assert_array_almost_equal(ERROR_OBJ._calc_lwc_gradient(), expected)
def test_calc_lwc_relative_error():
from cloudnetpy.utils import l2norm
ERROR_OBJ.lwc = np.ma.array([[0.1, 0.2, 0.3], [0.1, 0.3, 0.6]])
x = l2norm(* | np.gradient(ERROR_OBJ.lwc) | numpy.gradient |
"""
File containing functions linked to the constraint matrix
"""
import numpy as np
from numba import jit
from tqdm import tqdm
from scipy.sparse import coo_matrix, find
def completion_constraint(constraint_matrix, force = False):
"""
Complete the constraints matrix by
forcing consistency and transitive closure
NB: Matrix will be dense
Arguments:
constraint_matrix {sparse array} -- Constrained on data points
+1 => Constraint the points to be in the same cluster
-1 => Constraint the points to be in separate clusters
Returns:
Completed constraint matrix {sparse array}
"""
constraint_matrix = constraint_matrix.todense()
assert np.array_equal(constraint_matrix.T, constraint_matrix)
# Transitive closure on positive constraint
# Adaptated Floyd–Warshall algorithm
positive = np.where(constraint_matrix > 0, constraint_matrix, np.zeros_like(constraint_matrix))
negative = np.where(constraint_matrix < 0, constraint_matrix, np.zeros_like(constraint_matrix))
notNull = np.unique(np.argwhere(constraint_matrix != 0))
for k in notNull:
for end, i in enumerate(notNull):
for j in notNull[:end]:
# Improved version for going faster
value = positive[i, k] * positive[k, j]
if positive[i, j] < value:
positive[i, j] = value
positive[j, i] = value
value = positive[i, k] * negative[k, j] + negative[i, k] * positive[k, j]
if negative[i, j] > value:
negative[i, j] = value
negative[j, i] = value
if not(force):
# Verify that no opposite constraint
assert np.sum(np.multiply(positive, constraint_matrix) < 0) == 0, "Transitive Closure breaks constraint (use force option to erase the less sure constraint)"
assert np.sum(np.multiply(negative, constraint_matrix) < 0) == 0, "Transitive Closure breaks constraint (use force option to erase the less sure constraint)"
# Take the most confident constraint
result = np.where(positive >= np.abs(constraint_matrix), positive, constraint_matrix)
result = np.where(np.abs(negative) >= np.abs(result), negative, result)
return coo_matrix(result)
def random_indices(list_points, number_indices):
"""
Generates a list of indices to apply on the constraint matrix
without redundancy
Arguments:
list_points {List of Int / Int} -- Number of points in dataset or list of points to take into account
number_indices {Int} -- Number of indices needed
Returns:
List of pairs of coordinates
"""
if isinstance(list_points, int):
list_points = np.arange(list_points)
length = len(list_points)
indices = set()
while len(indices) < number_indices:
i = | np.random.randint(length - 1) | numpy.random.randint |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 18:28:54 2020
@author: Dr <NAME> (CIMAT-CONACYT, Mexico) jac at cimat.mx
Instantaneous reproduction numbers calculations.
Rts_P, Implementation of Cori et al (2013)
Rts_AR, new filtering version using an autoregressive linear model of Capistrán, Capella and Christen (2020):
https://arxiv.org/abs/2012.02168, 05DIC2021
01FEB2021: Some buggs were corrected to avoid error when too low counts are used and for prediction when g=1.
Go directly to __main__ for examples.
"""
import os
from datetime import date, timedelta
from pickle import load, dump
from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones
from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt
from numpy import sum as np_sum
from scipy.stats import erlang, gamma, nbinom, uniform, beta
from scipy.stats import t as t_student
from matplotlib.pyplot import subplots, rcParams, close
from matplotlib.dates import drange
from pytwalk import pytwalk
from plotfrozen import PlotFrozenDist
def Rts_P( data, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt as in:
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
if q == 2: # return a and b of post gamma
rt = zeros(( q, n))
else:
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
for t in range(max(m-n,0), m):
S1 = 0.0
S2 = 0.0
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
S2 += data[(t-k)]
S1 += sum(I * w[(m-(t-k)):]) #\Gamma_k
#print( (Rt_pr_a+S2) * (1/(S1 + 1/Rt_pr_b)), (Rt_pr_a+S2), 1/(S1 + 1/Rt_pr_b))
if simulate:
if q == 2: #Return Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b)
rt[:,t-(m-n)] = Rt_pr_a+S2, 1/(S1 + 1/Rt_pr_b)
else:
rt[:,t-(m-n)] = gamma.rvs( Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b), size=q)
else:
rt[:,t-(m-n)] = gamma.ppf( q, Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b))
return rt
def PlotRts_P( data_fnam, init_date, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', median_color='red', alpha=0.25, ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_P.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_P(data=data[:,1],\
tau=tau, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=alpha)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=alpha)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
"""
def loglikelihood_NB( x, mu, psi):
mu_psi = mu/psi
return -gammaln(x + 1) + gammaln(x + psi) - gammaln(psi)\
-(x + psi)*log(1 + mu_psi) + x*log(mu_psi)
"""
def loglikelihood_NB( x, mu, psi):
return beta.logcdf(x, mu*psi, (1-mu)*psi)
def Rts_NB( data, n=30, tau=7, psi=10, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt Using a Negative Binomial instead of Poisson.
Here one needs to fix psi = 1/theta (= 10).
Extension of (not documented):
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
quantiles = zeros(len(q))
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
R = linspace( 0.1, 3.0, num=100)
DeltaR = R[1]-R[0]
#omega = 1
#theta = THETA_MEAN #0.01
#psi = 1/theta
#fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5))
for t in range(max(m-n,0), m):
#S1 = 0.0
log_likelihood_I = zeros(R.shape) ## Same size of array for values for R
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
Gammak = I @ w[(m-(t-k)):] #\Gamma_k
#S1 += Gammak
I_k = data[(t-k)]
log_likelihood_I += loglikelihood_NB( I_k, R*Gammak, psi)
log_post = log_likelihood_I + gamma.logpdf( R, Rt_pr_a, scale=1/Rt_pr_b)
pdf = exp(log_post)
pdf /= sum(pdf)*DeltaR
cdf = cumsum(pdf)*DeltaR
if simulate:
u = uniform.rvs()
rt[:,t-(m-n)] = R[where(cdf < u)[0][-1]]
else:
for i,qua in enumerate(q):
quantiles[i] = R[where(cdf < qua)[0][-1]]
rt[:,t-(m-n)] = quantiles
return rt
def PlotRts_NB( data_fnam, init_date, psi, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_NB.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_NB(data=data[:,1],\
tau=tau, psi=psi, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color='red' )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
class Rts_NB_psi:
def __init__( self, data_fnam, init_date, trim=0, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90], workdir="./../"):
"""Calculate Rt Using a Negative Binomial with unknown psi = 1/theta.
Here one needs to run the MCMC first, RunMCMC.
See example below.
Extension of (not documented):
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
"""
self.data_fnam = data_fnam
data = loadtxt(workdir + 'data/' + data_fnam + '.csv')
self.workdir = workdir
if trim < 0:
self.data = data[:trim,1]
else:
self.data = data[:,1]
#convolve
self.init_date = init_date
self.m = len(data)
self.IP_dist = IP_dist
self.w = diff(IP_dist.cdf( arange( 0, self.m+1)))
self.w /= sum(self.w)
self.w = flip(self.w)
self.n = min(self.m, n)
self.tau = tau
self.Rt_pr_a = Rt_pr_a
self.Rt_pr_b = Rt_pr_b
self.prior = gamma( self.Rt_pr_a, scale=1/self.Rt_pr_b)
#omega = 1
self.psi = 100
self.psi_prior = gamma( 3, scale=self.psi/3)
for t in range( self.m - self.n, self.m):
if sum(self.data[:t]) <= 10:# Rt calculated only for more than 10 counts
print("Not more than 10 counts for day %d" % (-t,))
self.n -= 1
self.Gammak = zeros(self.m) ##We calculate all gammas previously:
for s in range(self.m):
self.Gammak[s] = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
if os.path.isfile(workdir + 'output/' + self.data_fnam + '_rts.pkl'): # samples file exists
print("File with rts and psi samples exists, loading rts ...", end=' ')
self.rts = load(open(workdir + 'output/' + self.data_fnam + '_rts.pkl', 'rb'))
self.psi_samples = load(open(workdir + 'output/' + self.data_fnam + '_rts_psi.pkl', 'rb'))
else:
print("File with rts and psi samples does not exist, run RunMCMC first.")
def logpost( self, Rs, psi):
log_post = 0.0
for t in range( self.m - self.n, self.m):
log_post += self.prior.logpdf( Rs[t-(self.m - self.n)]) +\
np_sum(loglikelihood_NB( self.data[(t-self.tau+1):t], Rs[t-(self.m - self.n)]*tst.Gammak[(t-self.tau+1):t], psi))
#log_post += sum([loglikelihood_NB( self.data[s], Rs[t-(self.m - self.n)]*self.Gammak[s], psi) for s in range( t-self.tau+1, t)])
"""
for k in range(self.tau):
s = t-k
#I = self.data[:s] ## window of reports
#Gammak = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
#I_k = self.data[s]
log_post += loglikelihood_NB( self.data[s], Rs[t-(self.m - self.n)]*self.Gammak[s], psi)
log_post += self.prior.logpdf( Rs[t-(self.m - self.n)])
"""
return log_post
def sim_init(self):
"""Simulate initial values from the Rts_NB and the prior for psi."""
# Shake the Rts_NB simulation to avoid repeated values
#shake = Rts_NB( self.data*self.Z, tau=self.tau, n=self.n, IP_dist=self.IP_dist,\
# Rt_pr_a=self.Rt_pr_a, Rt_pr_b=self.Rt_pr_b, q=1) + 0.001*uniform.rvs(size=self.n)
shake = ones(self.n) + 0.001*uniform.rvs(size=self.n)
return append( shake, self.psi_prior.rvs(size=1))
#Simulate intial values from the prior.
#return append(self.prior.rvs(size=self.n),self.psi_prior.rvs(size=1))
def support(self, x):
rt = all( (0.1 <= x[:-1]) * (x[:-1] <= 40) ) #Rt's
rt &= (x[-1] > 0.0)
return rt
def RunMCMC( self, T, burnin=5000, q=[10,25,50,75,90]):
"""Run twalk MCMC, T = number of iterations.
burnin, thining = IAT.
"""
#self.twalk = pytwalk(n = self.n+1, U=lambda x: -self.logpost( x[:-1], self.psi), Supp =self.support) #Ignore x[-1] = psi
self.twalk = pytwalk(n = self.n+1, U=lambda x: -self.logpost( x[:-1], x[-1]) - self.prior.logpdf(x[-1]), Supp =self.support)
self.twalk.Run( T=T, x0 = self.sim_init(), xp0 = self.sim_init())
self.burnin = burnin
self.Rts(q=q)
dump( self.rts, open(self.workdir + 'output/' + self.data_fnam + '_rts.pkl', 'wb'))
self.psi_samples = self.twalk.Output[self.burnin:, self.n]
dump( self.psi_samples, open(self.workdir + 'output/' + self.data_fnam + '_rts_psi.pkl', 'wb'))
def PlotPostPsi( self, ax=None):
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
PlotFrozenDist(self.psi_prior, color='green', ax=ax)
ax.hist( self.psi_samples, density=True)
ax.set_xlabel(r'$\psi$')
def PlotPostRt( self, i, ax=None):
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
#PlotFrozenDist(self.psi_prior, color='green', ax=ax)
ax.hist( self.twalk.Output[self.burnin:,i], density=True)
ax.set_xlabel(r'$R_%d$' % (i))
def Rts( self, q=[10,25,50,75,90]):
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rts = zeros(( len(q), self.n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
rts = zeros(( q, self.n))
simulate = True
self.q = q
self.simulate = simulate
#fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5))
for i in range(self.n):
if simulate:
#u = uniform.rvs()
rts[:,i] = self.twalk.Output[self.burnin+0,i]
else:
rts[:,i] = quantile( self.twalk.Output[self.burnin:,i], q=q)
self.rts = rts
return rts
def PlotRts( self, color='blue', median_color='red', csv_fnam=None, ax=None):
"""Makes a board with the Rt evolution.
csv_fnam is an optional file name to save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
#self.rts already been produced after running RunMCMC
last_date = self.init_date + timedelta(self.m)
if ax == None:
fig, ax = subplots(figsize=( self.n/3, 3.5) )
for i in range(self.n):
h = self.rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(self.data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(self.n))
ax.set_xticklabels([(last_date-timedelta(self.n-i)).strftime("%d.%m") for i in range(self.n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(self.n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(self.q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = self.rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in self.q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
class Rts_AR:
def __init__( self, data_fnam, init_date, trim=0,\
IP_dist=erlang( a=3, scale=8/3), tau=7, m0=0, c_a_0=1, w_a_t=2/7, n0=2, s0=3,\
n=30, pred=0, workdir="./../"):
"""Calculate Rt Using a log autoregressive time series on the logs.
See: ...
See example below.
Parameters:
data_fnam: file name = workdir + 'data/' + data_fnam + '.csv'
or array with case incidence.
init_date: intial date for firt datum, e.g. date(2020, 2, 27).
trim: (negative) cut trim days at the end of data.
tau: number of days to lern form the past (default 7, see paper).
n: calculate n R_t's to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
m0=0, c_a_0=1, w_a_t=0.25, n0=2, s0=3, m_0, c_0^*, w_t^*, n_0 prior
hyperparameters (see paper).
"""
self.data_fnam = data_fnam
data = loadtxt(workdir + 'data/' + data_fnam + '.csv')
self.workdir = workdir
if trim < 0:
self.data = data[:trim,1]
else:
self.data = data[:,1]
self.init_date = init_date
self.m = len(self.data) ##Data size
### Calculate the serial time distribution
self.IP_dist = IP_dist
self.w = diff(IP_dist.cdf( arange( 0, self.m+1)))
self.w /= sum(self.w)
self.w = flip(self.w)
### Calculation range
self.shift = 5*tau #Number of days to start calculation before the frist Rt.
self.n = min(self.m, n) #Number of Rt's to calculate, from the present into the past.
self.N = n+self.shift #Total range (into the past) for calculation
#If self.N is larger than the whole data set
if self.N > (self.m-1):
self.n -= self.N - (self.m-1)#Reduce self.n accordingly
self.N = n+self.shift
if self.n < 0:
raise ValueError("ERROR: Not enough data to calculate Rts: 5*tau > %d (data size)" % (self.m,))
print("Not enough data to calculate Rts: 5*tau + n > %d (data size)" % (self.m,))
print("Reducing to n=%d" % (self.n,))
for t in range(self.n):
if self.data[self.m-(self.n - t)] >= 10:
break
else:
self.n -= 1 #Reduce n if the counts have not reached 10
print("Incidence below 10, reducing n to %d." % (self.n,))
self.N = self.n+self.shift
### Setting prior parameters
self.delta = 1-(1/tau)
self.tau = tau
self.pred = pred
self.g = 1 #exp(-2/tau)
self.m0 = m0
self.c_a_0 = c_a_0
self.w_a_t = w_a_t
self.n0 = n0
self.s0 = s0
"""
### Calculation range
for t in range( self.m - self.N, self.m):
if sum(self.data[:t]) <= 10:# Rt calculated only for more than 10 counts
print("Not more than 10 counts for day %d" % (-t,))
self.n -= 1
self.N = min(self.m, n+self.shift)
"""
### We calculate all gammas previously:
self.Gammak = zeros(self.m)
for s in range(self.m):
self.Gammak[s] = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
### Calculate the log data:
### We add 1e-6 for convinience, since very early data may be zero
### This makes no diference at the end.
self.y = log(self.data + 1e-6) - log(self.Gammak + 1e-6)
def sim_data( self, R, I0):
pass
def CalculateRts( self, q=[10,25,50,75,90]):
"""Calculate the posterior distribution and the Rt's quantiles.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt.
If q=2, save the mean and dispersion parameter of the posterior for Rt
"""
if isinstance( q, list): ## Return a list of quantiles
q = | array(q) | numpy.array |
"""
This module contains geoscience-related transfer functions whose use is completely optional.
"""
from __future__ import division
import numpy as np
import datashader.transfer_functions as tf
from datashader import Canvas
from datashader.colors import rgb
from datashader.utils import ngjit, lnglat_to_meters # noqa (API import)
from xarray import DataArray
__all__ = ['mean', 'binary', 'slope', 'aspect', 'ndvi', 'hillshade', 'generate_terrain',
'lnglat_to_meters']
def hillshade(agg, azimuth=225, angle_altitude=25):
"""Illuminates 2D DataArray from specific azimuth and altitude.
Parameters
----------
agg : DataArray
altitude : int, optional (default: 30)
Altitude angle of the sun specified in degrees.
azimuth : int, optional (default: 315)
The angle between the north vector and the perpendicular projection
of the light source down onto the horizon specified in degrees.
cmap : list of colors or matplotlib.colors.Colormap, optional
The colormap to use. Can be either a list of colors (in any of the
formats described above), or a matplotlib colormap object.
Default is `["lightgray", "black"]`
how : str or callable, optional
The hillshade method to use. Valid strings are 'mdow' [default],
'simple'.
alpha : int, optional
Value between 0 - 255 representing the alpha value of pixels which contain
data (i.e. non-nan values). Regardless of this value, `NaN` values are
set to fully transparent.
Returns
-------
Datashader Image
Notes:
------
Algorithm References:
- http://geoexamples.blogspot.com/2014/03/shaded-relief-images-using-gdal-python.html
"""
azimuth = 360.0 - azimuth
x, y = np.gradient(agg.data)
slope = np.pi/2. - np.arctan(np.sqrt(x*x + y*y))
aspect = np.arctan2(-x, y)
azimuthrad = azimuth*np.pi/180.
altituderad = angle_altitude*np.pi/180.
shaded = np.sin(altituderad) * np.sin(slope) + np.cos(altituderad) * np.cos(slope)*np.cos((azimuthrad - np.pi/2.) - aspect)
data = (shaded + 1) / 2
return DataArray(data, attrs=agg.attrs)
@ngjit
def _horn_slope(data, cellsize):
out = np.zeros_like(data)
rows, cols = data.shape
for y in range(1, rows-1):
for x in range(1, cols-1):
a = data[y+1, x-1]
b = data[y+1, x]
c = data[y+1, x+1]
d = data[y, x-1]
f = data[y, x+1]
g = data[y-1, x-1]
h = data[y-1, x]
i = data[y-1, x+1]
dz_dx = ((c + 2 * f + i) - (a + 2 * d + g)) / (8 * cellsize)
dz_dy = ((g + 2 * h + i) - (a + 2 * b + c)) / (8 * cellsize)
p = (dz_dx * dz_dx + dz_dy * dz_dy) ** .5
out[y, x] = np.arctan(p) * 57.29578
return out
def slope(agg):
"""Returns slope of input aggregate in degrees.
Parameters
----------
agg : DataArray
Returns
-------
data: DataArray
Notes:
------
Algorithm References:
- http://desktop.arcgis.com/en/arcmap/10.3/tools/spatial-analyst-toolbox/how-slope-works.htm
- <NAME>., and <NAME>., 1998. Principles of Geographical Information Systems (Oxford University Press, New York), pp 406
"""
if not isinstance(agg, DataArray):
raise TypeError("agg must be instance of DataArray")
if not agg.attrs.get('res'):
#TODO: maybe monkey-patch a "res" attribute valueing unity is reasonable
raise ValueError('input xarray must have numeric `res` attr.')
slope_agg = _horn_slope(agg.data, agg.attrs['res'])
return DataArray(slope_agg,
name='slope',
coords=agg.coords,
dims=agg.dims,
attrs=agg.attrs)
@ngjit
def _ndvi(nir_data, red_data):
out = np.zeros_like(nir_data)
rows, cols = nir_data.shape
for y in range(0, rows):
for x in range(0, cols):
nir = nir_data[y, x]
red = red_data[y, x]
if nir == red: # cover zero divison case
continue
soma = nir + red
out[y, x] = (nir - red) / soma
return out
def ndvi(nir_agg, red_agg):
"""Returns Normalized Difference Vegetation Index (NDVI).
Parameters
----------
nir_agg : DataArray
near-infrared band data
red_agg : DataArray
red band data
Returns
-------
data: DataArray
Notes:
------
Algorithm References:
- http://ceholden.github.io/open-geo-tutorial/python/chapter_2_indices.html
"""
if not isinstance(nir_agg, DataArray):
raise TypeError("nir_agg must be instance of DataArray")
if not isinstance(red_agg, DataArray):
raise TypeError("red_agg must be instance of DataArray")
if not red_agg.shape == nir_agg.shape:
raise ValueError("red_agg and nir_agg expected to have equal shapes")
return DataArray(_ndvi(nir_agg.data, red_agg.data),
attrs=nir_agg.attrs)
@ngjit
def _horn_aspect(data):
out = np.zeros_like(data)
rows, cols = data.shape
for y in range(1, rows-1):
for x in range(1, cols-1):
a = data[y+1, x-1]
b = data[y+1, x]
c = data[y+1, x+1]
d = data[y, x-1]
f = data[y, x+1]
g = data[y-1, x-1]
h = data[y-1, x]
i = data[y-1, x+1]
dz_dx = ((c + 2 * f + i) - (a + 2 * d + g)) / 8
dz_dy = ((g + 2 * h + i) - (a + 2 * b + c)) / 8
aspect = np.arctan2(dz_dy, -dz_dx) * 57.29578 # (180 / pi)
if aspect < 0:
out[y, x] = 90.0 - aspect
elif aspect > 90.0:
out[y, x] = 360.0 - aspect + 90.0
else:
out[y, x] = 90.0 - aspect
return out
def aspect(agg):
"""Returns downward slope direction in compass degrees (0 - 360) with 0 at 12 o'clock.
Parameters
----------
agg : DataArray
Returns
-------
data: DataArray
Notes:
------
Algorithm References:
- http://desktop.arcgis.com/en/arcmap/10.3/tools/spatial-analyst-toolbox/how-aspect-works.htm#ESRI_SECTION1_4198691F8852475A9F4BC71246579FAA
- <NAME>., and <NAME>., 1998. Principles of Geographical Information Systems (Oxford University Press, New York), pp 406
"""
if not isinstance(agg, DataArray):
raise TypeError("agg must be instance of DataArray")
return DataArray(_horn_aspect(agg.data),
dims=['y', 'x'],
attrs=agg.attrs)
def color_values(agg, color_key, alpha=255):
def _convert_color(c):
r, g, b = rgb(c)
return np.array([r, g, b, alpha]).astype(np.uint8).view(np.uint32)[0]
_converted_colors = {k: _convert_color(v) for k, v in color_key.items()}
f = np.vectorize(lambda v: _converted_colors.get(v, 0))
return tf.Image(f(agg.data))
@ngjit
def _binary(data, values):
out = np.zeros_like(data)
rows, cols = data.shape
for x in range(0, rows):
for y in range(0, cols):
if data[y, x] in values:
out[y, x] = True
else:
out[y, x] = False
return out
def binary(agg, values):
return DataArray(_binary(agg.data, values),
dims=['y', 'x'],
attrs=agg.attrs)
@ngjit
def _mean(data, excludes):
out = np.zeros_like(data)
rows, cols = data.shape
for y in range(1, rows-1):
for x in range(1, cols-1):
exclude = False
for ex in excludes:
if data[y,x] == ex:
exclude = True
break
if not exclude:
a,b,c,d,e,f,g,h,i = [data[y-1, x-1], data[y, x-1], data[y+1, x-1],
data[y-1, x], data[y, x], data[y+1, x],
data[y-1, x+1], data[y, x+1], data[y+1, x+1]]
out[y, x] = (a+b+c+d+e+f+g+h+i) / 9
else:
out[y, x] = data[y, x]
return out
def mean(agg, passes=1, excludes=[np.nan]):
"""
Returns Mean filtered array using a 3x3 window
Parameters
----------
agg : DataArray
passes : int, number of times to run mean
Returns
-------
data: DataArray
"""
out = None
for i in range(passes):
if out is None:
out = _mean(agg.data, excludes)
else:
out = _mean(out, excludes)
return DataArray(out, dims=['y', 'x'], attrs=agg.attrs)
def generate_terrain(canvas, seed=10, zfactor=4000, full_extent='3857'):
"""
Generates a pseudo-random terrain which can be helpful for testing raster functions
Parameters
----------
canvas : ds.Canvas instance for passing output dimensions / ranges
seed : seed for random number generator
zfactor : used as multipler for z values
canvas_wkid : wellknownid of input canvas
Returns
-------
terrain: DataArray
Notes:
------
Algorithm References:
- This was inspired by Michael McHugh's 2016 PyCon Canada talk:
https://www.youtube.com/watch?v=O33YV4ooHSo
- https://www.redblobgames.com/maps/terrain-from-noise/
"""
def _gen_heights(bumps):
out = np.zeros(len(bumps))
for i, b in enumerate(bumps):
x = b[0]
y = b[1]
val = agg.data[y, x]
if val >= 0.33 and val <= 3:
out[i] = 0.1
return out
def _scale(value, old_range, new_range):
return ((value - old_range[0]) / (old_range[1] - old_range[0])) * (new_range[1] - new_range[0]) + new_range[0]
if not isinstance(canvas, Canvas):
raise TypeError('canvas must be instance type datashader.Canvas')
mercator_extent = (-np.pi * 6378137, -np.pi * 6378137, np.pi * 6378137, np.pi * 6378137)
crs_extents = {'3857': mercator_extent}
if isinstance(full_extent, str):
full_extent = crs_extents[full_extent]
elif full_extent is None:
full_extent = mercator_extent
elif not isinstance(full_extent, (list, tuple)) and len(full_extent) != 4:
raise TypeError('full_extent must be tuple(4) or str wkid')
full_xrange = (full_extent[0], full_extent[2])
full_yrange = (full_extent[1], full_extent[3])
x_range_scaled = (_scale(canvas.x_range[0], full_xrange, (0, 1)),
_scale(canvas.x_range[1], full_xrange, (0, 1)))
y_range_scaled = (_scale(canvas.y_range[0], full_yrange, (0, 1)),
_scale(canvas.y_range[1], full_yrange, (0, 1)))
data = _gen_terrain(canvas.plot_width, canvas.plot_height, seed,
x_range=x_range_scaled, y_range=y_range_scaled)
data = (data - np.min(data))/np.ptp(data)
data[data < 0.3] = 0 # create water
data *= zfactor
xs = np.linspace(canvas.x_range[0], canvas.x_range[1], canvas.plot_width, endpoint=False)
ys = np.linspace(canvas.y_range[0], canvas.y_range[1], canvas.plot_height, endpoint=False)
agg = DataArray(data,
coords=dict(x=xs, y=ys),
dims=['y', 'x'],
attrs={'res':1})
return agg
def _gen_terrain(width, height, seed, x_range=None, y_range=None):
if not x_range:
x_range = (0, 1)
if not y_range:
y_range = (0, 1)
# multiplier, (xfreq, yfreq)
NOISE_LAYERS= ((1 / 2**i, (2**i, 2**i)) for i in range(16))
linx = np.linspace(x_range[0], x_range[1], width, endpoint=False)
liny = np.linspace(y_range[0], y_range[1], height, endpoint=False)
x, y = np.meshgrid(linx, liny)
height_map = None
for i, (m, (xfreq, yfreq)) in enumerate(NOISE_LAYERS):
noise = _perlin(x * xfreq, y * yfreq, seed=seed + i) * m
if height_map is None:
height_map = noise
else:
height_map += noise
height_map /= (1.00 + 0.50 + 0.25 + 0.13 + 0.06 + 0.03)
height_map = height_map ** 3
return height_map
def bump(width, height, count=None, height_func=None, spread=1):
"""
Generate a simple bump map
Parameters
----------
width : int
height : int
count : int (defaults: w * h / 10)
height_func : function which takes x, y and returns a height value
spread : tuple boundaries
Returns
-------
bumpmap: DataArray
Notes:
------
Algorithm References:
- http://www.mountaincartography.org/mt_hood/pdfs/nighbert_bump1.pdf
"""
linx = range(width)
liny = range(height)
if count is None:
count = width * height // 10
if height_func is None:
height_func = lambda bumps: np.ones(len(bumps))
bump_xs = np.random.choice(linx, count).tolist()
bump_ys = np.random.choice(liny, count).tolist()
locs = list(zip(bump_xs, bump_ys))
heights = height_func(locs)
bumps = _finish_bump(width, height, locs, heights, spread)
return DataArray(bumps, dims=['y', 'x'], attrs=dict(res=1))
@ngjit
def _finish_bump(width, height, locs, heights, spread):
out = np.zeros((height, width))
rows, cols = out.shape
s = spread ** 2 # removed sqrt for perf.
for i in range(len(heights)):
x = locs[i][0]
y = locs[i][1]
z = heights[i]
out[y, x] = out[y, x] + z
if s > 0:
for nx in range(max(x - spread, 0), min(x + spread, width)):
for ny in range(max(y - spread, 0), min(y + spread, height)):
d2 = (nx - x) * (nx - x) + (ny - y) * (ny - y)
if d2 <= s:
out[ny, nx] = out[ny,nx] + (out[y, x] * (d2 / s))
return out
def perlin(width, height, freq=(1, 1), seed=5):
"""
Generate perlin noise aggregate
Parameters
----------
width : int
height : int
freq : tuple of (x, y) frequency multipliers
seed : int
Returns
-------
bumpmap: DataArray
Notes:
------
Algorithm References:
- numba-ized from Paul Panzer example available here:
https://stackoverflow.com/questions/42147776/producing-2d-perlin-noise-with-numpy
- http://www.mountaincartography.org/mt_hood/pdfs/nighbert_bump1.pdf
"""
linx = range(width)
liny = range(height)
linx = np.linspace(0, 1, width, endpoint=False)
liny = np.linspace(0, 1, height, endpoint=False)
x, y = np.meshgrid(linx, liny)
data = _perlin(x * freq[0], y * freq[1], seed=seed)
data = (data - np.min(data))/np.ptp(data)
return DataArray(data, dims=['y', 'x'], attrs=dict(res=1))
@ngjit
def _lerp(a, b, x):
return a + x * (b-a)
@ngjit
def _fade(t):
return 6 * t**5 - 15 * t**4 + 10 * t**3
@ngjit
def _gradient(h, x, y):
vectors = | np.array([[0,1],[0,-1],[1,0],[-1,0]]) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import math
from mpl_toolkits.mplot3d import Axes3D
from scipy import integrate
from pylab import *
C = 299792.458
PI = np.pi
def sigma_sum(start, end, expression):
return sum(expression(i) for i in range(start, end, 2))
def fourier_series(x, f, n=0):
"""
Returns a symbolic fourier series of order `n`.
:param n: Order of the fourier series.
:param x: Independent variable
:param f: Frequency of the fourier series
"""
from symfit import parameters, variables, sin, cos, Fit
# Make the parameter objects for all the terms
a0, *cos_a = parameters(','.join(['a{}'.format(i) for i in range(0, n + 1)]))
sin_b = parameters(','.join(['b{}'.format(i) for i in range(1, n + 1)]))
# Construct the series
series = a0 + sum(ai * cos(i * f * x) + bi * sin(i * f * x)
for i, (ai, bi) in enumerate(zip(cos_a, sin_b), start=1))
return series
def beamforming(d_ant, freq, arr_a, arr_p):
"""
Returns beamforming result as power and phase.
:param d_ant: antenna unit interval as mm.
:param freq: Frequency as MHz
:param arr_a: weight list of amplitude as numpy array
:param arr_p: weight list of phase as numpy array
"""
if len(arr_a) == len(arr_p):
theta_grid = 360
arr_theta = np.arange(-PI, PI, 2 * PI/theta_grid)
# 单元阵子+天线增益
arr_elm = -np.minimum(12 * ((arr_theta/(90 / 180 * PI)) ** 2), 30) + 12
arr_re, arr_im = np.zeros(theta_grid), np.zeros(theta_grid)
arr_idx = np.arange(0, len(arr_a))
d = d_ant/C*freq
for iii in range(theta_grid):
delta_p = d * np.sin(arr_theta[iii]) * PI * 2
arr_re[iii] = sum(arr_a * np.cos(arr_p / 180 * PI + arr_idx * delta_p))
arr_im[iii] = sum(arr_a * np.sin(arr_p / 180 * PI + arr_idx * delta_p))
arr_c = np.vectorize(complex)(arr_re, arr_im)
arr_pwr = np.maximum(10 * np.log10(arr_c ** 2) + arr_elm, np.zeros(arr_c.shape))
return arr_theta, arr_pwr
def demo_IQ_modulation_demodulation(period=3, weight=1):
print('_____演示:I/Q数据的生成与解调_____')
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用于正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用于正常显示负号
t = np.linspace(0, 2 * np.pi * period, 1000)
dt = t[1] - t[0]
# IQ输入
a, b = 1.68, -1.45
print('IQ信号输入:', a, b)
# IQ生成
y_cos, y_sin = a * np.cos(weight * t), -b * np.sin(weight * t)
y_iq = y_cos + y_sin
plt.figure(figsize=(8, 4))
plt.subplot(211)
plt.plot(t, y_cos, 'b--', label=r'$ cos ωt $', linewidth=1)
plt.plot(t, y_sin, 'g--', label=r'$ -sin ωt $', linewidth=1)
plt.plot(t, y_iq, label=r'$ IQ $', color='red', linewidth=1)
plt.legend(loc='upper right')
# IQ解调
y_i, y_q = y_iq * np.cos(weight * t), y_iq * (-np.sin(weight * t))
# 解调输出
demo_a, demo_b = np.sum(y_i * dt)/period/np.pi, np.sum(y_q * dt)/period/np.pi
print('IQ信号解调:', demo_a, demo_b)
plt.subplot(212)
plt.plot(t, y_i, 'b--', label=r'$ I $', linewidth=1)
plt.plot(t, y_q, 'g--', label=r'$ Q $', linewidth=1)
plt.plot(t, y_iq, label=r'$ IQ $', color='red', linewidth=1)
# plt.plot(t, y_iq, label=r'$\cos ωt - sin ωt$', color='red', linewidth=1)
plt.xlabel('Time(s)')
plt.ylabel('amplitude')
# plt.title('A Sample Example')
# plt.ylim(-2, 2)
# plt.xlim(0, 10)
plt.legend(loc='upper right')
plt.show()
def demo_rectangular_wave_fourier_series(period=3, sigma_lv=5):
figure(figsize=(20, 6), dpi=80)
shift_t, shift_p = 0, 0
x = np.linspace(0, 2 * np.pi * period, 2048)
y = shift_p + sigma_sum(1, sigma_lv*2 + 1, lambda i: pow(-1, int(i/2)) / i * np.cos(i * (x + np.pi * shift_t)))
plt.plot(x, y)
plt.show()
def demo_rectangular_wave_fourier_series_3d(period=3, sigma_lv=5):
shift_t, shift_p = 0, 0
x = np.linspace(0, 2 * np.pi * period, 2048)
y = shift_p + sigma_sum(1, sigma_lv*2 + 1, lambda i: pow(-1, int(i/2)) / i * np.cos(i * (x + np.pi * shift_t)))
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(111, projection='3d')
z = np.zeros(x.shape)
plt.plot(x, z, y)
# 分量
for iii in range(1, sigma_lv*2 + 1):
y = pow(-1, int(iii/2)) / iii * np.cos(iii * (x + np.pi * shift_t))
z = np.ones(x.shape) * 2 * iii + np.ones(x.shape) * 2
plt.plot(x, z, y)
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.show()
def demo_fourier_series():
from symfit import parameters, variables, sin, cos, Fit
x, y = variables('x, y')
w, = parameters('w')
model_dict = {y: fourier_series(x, f=w, n=3)}
print(model_dict)
# Make step function file_H
xdata = np.linspace(-np.pi, np.pi)
ydata = np.zeros_like(xdata)
ydata[xdata > 0] = 1
# Define a Fit object for this model and file_H
fit = Fit(model_dict, x=xdata, y=ydata)
fit_result = fit.execute()
print(fit_result)
# Plot the result
plt.plot(xdata, ydata)
plt.plot(xdata, fit.model(x=xdata, **fit_result.params).y, color='green', ls=':')
plt.show()
def demo_rotation_vector(end=50):
fig = plt.figure()
ax1 = Axes3D(fig)
zt = np.linspace(0, end, end * 20)
xc = np.cos(zt)
ys = np.sin(zt)
ax1.plot3D([0, end], [0, 0], [0, 0])
ax1.plot3D([end, end], [0, list(xc)[-1]], [0, list(ys)[-1]])
ax1.plot3D(zt, xc, ys)
ax1.set_xlabel('Time', color='g', fontsize='14')
ax1.set_ylabel('real°', color='b', fontsize='14')
ax1.set_zlabel('image', color='r', fontsize='14')
plt.show()
def demo_Lissajous_figur(num=3, end=5):
"""李萨育图形:
由在相互垂直的方向上的两个频率成简朴整数比的简谐振动所合成的规矩的、稳定的闭合曲线
相成谐波频率关系的两个信号分别作为X和Y偏转信号送入示波器时,这两个信号分别在X轴、Y轴方向同时作用于电子束而描绘出稳定的图形"""
# figure(figsize=(20, 3.5), dpi=80)
for n in range(1, num):
zt = np.linspace(0, end, end*100)
xc = np.cos(2 * np.pi * zt) # cos(2πft)
ys = np.sin(2 * np.pi * n * zt) # sin(2nπft)
# subplot(1, 5, n)
# plot(xc, ys)
# plt.show()
# 李萨育图形3D
fig = plt.figure()
ax1 = Axes3D(fig)
ax1.plot3D([0, end], [0, 0], [0, 0])
ax1.plot3D([end, end], [0, list(xc)[-1]], [0, list(ys)[-1]])
ax1.plot3D(zt, xc, ys)
plt.show()
def demo_cos_sin_function_composition():
plt.rcParams['font.sans-serif']=['SimHei']#用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False#用来正常显示负号
# # 一个周期
# x=np.arange(0,2*np.pi,0.01)
# y=np.sin(x)
x = np.linspace(0, 10, 1000)
y1 = np.sin(x) + 1
y2 = np.cos(x ** 2) + 1
plt.figure(figsize=(8, 4))
plt.plot(x, y1, label=r'$\sin x+1$', color='red', linewidth=2)
plt.plot(x, y2, 'b--', label=r'$\cos x^2+1$', linewidth=1)
plt.xlabel('Time(s)')
plt.ylabel('Amplitude')
plt.title('A Sample Example')
plt.ylim(0, 2.2)
plt.xlim(0, 10)
plt.legend(loc='upper right')
plt.show()
def demo_sinc(points=100):
plt.rcParams['font.sans-serif']=['SimHei']#用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False#用来正常显示负号
x = np.linspace(-5 * np.pi, 5 * np.pi, points)
y = np.sinc(x/np.pi)
y0 = [0] * points
# plt.axis([np.min(x), np.max(x), 0, np.max(y)])
plt.plot(x, y, label="$ function $")
plt.plot(x, y0, 'b--', linewidth=1)
# 填充积分区域
plt.fill_between(x, y1=y, y2=0, where=(x >= 0) & (x <= 2 * np.pi), facecolor='blue', alpha=0.2)
plt.legend()
plt.show()
def demo_antenna_unit_pattern():
# 单环天线 f(θ)=sin(π * cosθ/ λ)
theta = np.arange(0, 2*np.pi, 0.02)
d_lambda1 = 0.5
d_lambda2 = 0.75
ax = plt.subplot(121, polar=True) # 极坐标
ax.set_thetagrids(np.arange(0.0, 360.0, 10.0)) # 设置角度网格线
# ax.set_rgrids(np.arange(0.1, 1.6, 0.1), angle=45) # 设置半径网格线
ax.set_theta_zero_location('N') # 设置0°位置,其值可为'N','NW','W','SW','S','SE','E','NE'
ax.set_theta_direction(-1) # 设置极坐标的正方向,参数为-1时为顺时针方向
plt.plot(theta, np.abs(np.sin(d_lambda1 * np.pi * np.cos(theta))), color=[1, 0, 0], lw=1)
plt.plot(theta, np.abs(np.sin(d_lambda2 * np.pi * np.cos(theta))), '--', lw=1)
plt.title("d_lambda="+str(d_lambda1), fontsize=12)
# plt.savefig('d_lambda='+str(d_lambda)+'.png')
# plt.show()
# 复合环天线 f(θ)= sqrt(cosθ**2 + 2kcosρcosθ + k**2)
k = 0.7
phi0 = 0
ax = plt.subplot(122, polar=True) # 极坐标
ax.set_thetagrids(np.arange(0.0, 360.0, 10.0)) # 设置角度网格线
# ax.set_rgrids(np.arange(0.2, 2, 0.2), angle=45) # 设置半径网格线
ax.set_theta_zero_location('N') # 设置0°位置,其值可为'N','NW','W','SW','S','SE','E','NE'
ax.set_theta_direction(-1) # 设置极坐标的正方向,参数为-1时为顺时针方向
plt.plot(theta, np.sqrt(np.square(np.cos(theta))+2*k*np.cos(phi0)*np.cos(theta)+np.square(k)), color=[1, 0, 0], lw=2)
plt.title("k="+str(k)+",phi0="+str(phi0), fontsize=12)
plt.savefig("k="+str(k)+" with phi0="+str(phi0)+'.png')
plt.show()
def demo_beamforming_patten(d_ant=57, freq=2600,
arr_a=[1, 1, 1, 1, 1, 1, 1, 1],
arr_p=[0, 45, 90, 135, 180, 225, 270, 315]):
"""
Returns beamforming result as power and phase.
:param d_ant: antenna unit interval as mm.
:param freq: Frequency as MHz
:param arr_a: weight list of amplitude
:param arr_p: weight list of phase
"""
arr_a = np.array(arr_a)
arr_p = np.array(arr_p)
arr_theta, arr_pwr = beamforming(d_ant, freq, arr_a, arr_p)
ax = plt.subplot(111, polar=True) # 极坐标
ax.set_thetagrids( | np.arange(0.0, 360.0, 10.0) | numpy.arange |
import pickle
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
import cv2
import os
import numpy as np
import settings
import math
from settings import CALIB_FILE_NAME, PERSPECTIVE_FILE_NAME
def get_center_shift(coeffs, img_size, pixels_per_meter):
return np.polyval(coeffs, img_size[1]/pixels_per_meter[1]) - (img_size[0]//2)/pixels_per_meter[0]
def get_curvature(coeffs, img_size, pixels_per_meter):
return ((1 + (2*coeffs[0]*img_size[1]/pixels_per_meter[1] + coeffs[1])**2)**1.5) / np.absolute(2*coeffs[0])
#class that finds line in a mask
class LaneLineFinder:
def __init__(self, img_size, pixels_per_meter, center_shift):
self.found = False
self.poly_coeffs = np.zeros(3, dtype=np.float32)
self.coeff_history = np.zeros((3, 7), dtype=np.float32)
self.img_size = img_size
self.pixels_per_meter = pixels_per_meter
self.line_mask = np.ones((img_size[1], img_size[0]), dtype=np.uint8)
self.other_line_mask = np.zeros_like(self.line_mask)
self.line = np.zeros_like(self.line_mask)
self.num_lost = 0
self.still_to_find = 1
self.shift = center_shift
self.first = True
self.stddev = 0
def reset_lane_line(self):
self.found = False
self.poly_coeffs = np.zeros(3, dtype=np.float32)
self.line_mask[:] = 1
self.first = True
def one_lost(self):
self.still_to_find = 5
if self.found:
self.num_lost += 1
if self.num_lost >= 7:
self.reset_lane_line()
def one_found(self):
self.first = False
self.num_lost = 0
if not self.found:
self.still_to_find -= 1
if self.still_to_find <= 0:
self.found = True
def fit_lane_line(self, mask):
y_coord, x_coord = np.where(mask)
y_coord = y_coord.astype(np.float32)/self.pixels_per_meter[1]
x_coord = x_coord.astype(np.float32)/self.pixels_per_meter[0]
if len(y_coord) <= 150:
coeffs = np.array([0, 0, (self.img_size[0]//2)/self.pixels_per_meter[0] + self.shift], dtype=np.float32)
else:
coeffs, v = np.polyfit(y_coord, x_coord, 2, rcond=1e-16, cov=True)
self.stddev = 1 - math.exp(-5*np.sqrt(np.trace(v)))
self.coeff_history = np.roll(self.coeff_history, 1)
if self.first:
self.coeff_history = np.reshape(np.repeat(coeffs, 7), (3, 7))
else:
self.coeff_history[:, 0] = coeffs
value_x = get_center_shift(coeffs, self.img_size, self.pixels_per_meter)
curve = get_curvature(coeffs, self.img_size, self.pixels_per_meter)
print(value_x - self.shift)
if (self.stddev > 0.95) | (len(y_coord) < 150) | (math.fabs(value_x - self.shift) > math.fabs(0.5*self.shift)) \
| (curve < 30):
self.coeff_history[0:2, 0] = 0
self.coeff_history[2, 0] = (self.img_size[0]//2)/self.pixels_per_meter[0] + self.shift
self.one_lost()
print(self.stddev, len(y_coord), math.fabs(value_x-self.shift)-math.fabs(0.5*self.shift), curve)
else:
self.one_found()
self.poly_coeffs = np.mean(self.coeff_history, axis=1)
def get_line_points(self):
y = np.array(range(0, self.img_size[1]+1, 10), dtype=np.float32)/self.pixels_per_meter[1]
x = np.polyval(self.poly_coeffs, y)*self.pixels_per_meter[0]
y *= self.pixels_per_meter[1]
return np.array([x, y], dtype=np.int32).T
def get_other_line_points(self):
pts = self.get_line_points()
pts[:, 0] = pts[:, 0] - 2*self.shift*self.pixels_per_meter[0]
return pts
def find_lane_line(self, mask, reset=False):
n_segments = 16
window_width = 30
step = self.img_size[1]//n_segments
if reset or (not self.found and self.still_to_find == 5) or self.first:
self.line_mask[:] = 0
n_steps = 4
window_start = self.img_size[0]//2 + int(self.shift*self.pixels_per_meter[0]) - 3 * window_width
window_end = window_start + 6*window_width
sm = np.sum(mask[self.img_size[1]-4*step:self.img_size[1], window_start:window_end], axis=0)
sm = np.convolve(sm, np.ones((window_width,))/window_width, mode='same')
argmax = window_start + np.argmax(sm)
shift = 0
for last in range(self.img_size[1], 0, -step):
first_line = max(0, last - n_steps*step)
sm = np.sum(mask[first_line:last, :], axis=0)
sm = np.convolve(sm, np.ones((window_width,))/window_width, mode='same')
window_start = min(max(argmax + int(shift)-window_width//2, 0), self.img_size[0]-1)
window_end = min(max(argmax + int(shift) + window_width//2, 0+1), self.img_size[0])
new_argmax = window_start + | np.argmax(sm[window_start:window_end]) | numpy.argmax |
import pickle
import numpy
import torch
import torch.nn as nn
import gensim.downloader as api
from gensim.models import KeyedVectors
import modules.data_processors
from sklearn.decomposition import PCA
import os
embeddings = KeyedVectors.load_word2vec_format('C:/Users/fanny/Documents/Bachelor\'s Thesis/RUN/RUN-master/data/embeddings/GoogleNews-vectors-negative300.bin', binary=True)
#embeddings = api.load("word2vec-google-news-300")
#embeddings = embeddings.key_to_index
#embeddings = torch.FloatTensor(embeddings.vectors)
#embeddings.vectors = embeddings.vectors[:,0:100] # keeps just 1st 100 dims
#embeddings.vectors = embeddings.vectors
#embeddings.vector_size = 100
def embeddings_vec():
vec = []
#d = modules.data_processors.DataProcess(os.path.dirname(os.getcwd()) + "\data\\")
d = modules.data_processors.DataProcess()
for i in d.ind2word:
try:
vec.append(embeddings[d.ind2word[i]])
except KeyError:
#vec.append(torch.rand(100))
vec.append(torch.normal(0, 1, size=(300,1)))
vec = torch.FloatTensor(vec)
# Start of PPA new code
#PCA to get Top Components
pca = PCA(n_components = 300)
vec = vec - vec.mean()
vec = pca.fit_transform(vec)
U1 = pca.components_
z = []
# Removing Projections on Top Components
for i, x in enumerate(vec):
for u in U1[0:3]:
x = x - numpy.dot(u.transpose(),x) * u
z.append(x)
z = numpy.asarray(z)
#End of ppa
#Start of PCA
s = PCA(n_components=100)
new_vec = z - z.mean()
new_vec = s.fit_transform(new_vec)
# PCA to get Top Components
pca = PCA(n_components=100)
vec = new_vec - new_vec.mean()
vec = pca.fit_transform(vec)
U1 = pca.components_
z = []
# Removing Projections on Top Components
for i, x in enumerate(vec):
for u in U1[0:3]:
x = x - numpy.dot(u.transpose(), x) * u
z.append(x)
z = numpy.asarray(z)
new_vec = torch.FloatTensor(z)
return new_vec
def sample_weights(nrow, ncol):
"""
This is from Bengio's 2010 paper
"""
bound = ( | numpy.sqrt(6.0) | numpy.sqrt |
from math import inf
from numpy.core.defchararray import decode
import torch
from torch import optim
import torch.nn.functional as F
from torch._C import DeviceObjType
import torch.nn as nn
from .attack import Attacker
from .utils import target_sentence_to_label, levenshteinDistance
from tqdm import tqdm
import numpy as np
from scipy.signal import butter, lfilter
def highpass_filter(data, cutoff=7000, fs=16000, order=10):
b, a = butter(order, cutoff / (0.5 * fs), btype='high', analog=False)
return lfilter(b, a, data)
class GeneticAttacker(Attacker):
def __init__(self, model, device, **kwargs):
super(GeneticAttacker, self).__init__(model, device)
self._parse_params(**kwargs)
def _parse_params(self, **kwargs):
self.pop_size = kwargs.get('pop_size', 100)
self.elite_size = kwargs.get('elite_size', 10)
self.mutation_p = kwargs.get('mutation_p', 0.005)
self.noise_stdev = kwargs.get('noise_stdev', 0.002) # \approx 40 / 16384
self.momentum = kwargs.get('momentum', 0.9)
self.alpha = kwargs.get('alpha', 0.001)
self.iterations = kwargs.get('iterations', 3000)
self.num_points_estimate = kwargs.get('num_points_estimate', 100)
self.delta_for_gradient = kwargs.get('delta_for_gradient', 0.006) # \approx 100 / 16384
self.delta_for_perturbation = kwargs.get('delta_for_perturbation', 0.06) # \approx 1000 / 16384
self.freq_disp = kwargs.get('freq_disp', 10)
self.decrease_factor = kwargs.get('decrease_factor', 0.995)
def get_fitness_score(self, input_audio_batch, targets, target_lengths):
input_audio_batch = torch.from_numpy(input_audio_batch).to(self.device).float()
out, output_sizes = self.model(input_audio_batch)
out = out.transpose(0, 1).log()
targets = targets.repeat((input_audio_batch.size(0), 1))
target_lengths = target_lengths.repeat((input_audio_batch.size(0), 1)).view(-1)
scores = F.ctc_loss(out, targets, output_sizes, target_lengths, reduction='none')
return -scores
def get_text(self, input_audio):
input_audio = torch.from_numpy(input_audio).to(self.device).float()
return self.model(input_audio, decode=True)
def get_new_pop(self, elite_pop, elite_pop_scores, pop_size):
elite_logits = np.exp(elite_pop_scores - elite_pop_scores.max())
elite_probs = elite_logits / elite_logits.sum()
cand1 = elite_pop[np.random.choice(elite_pop.shape[0], p=elite_probs, size=pop_size)]
cand2 = elite_pop[np.random.choice(elite_pop.shape[0], p=elite_probs, size=pop_size)]
mask = np.random.rand(pop_size, elite_pop.shape[1]) < 0.5
next_pop = mask * cand1 + (1 - mask) * cand2
return next_pop
def mutate_pop(self, pop):
noise = np.random.randn(*pop.shape) * self.noise_stdev
noise = highpass_filter(noise)
mask = np.random.randn(*pop.shape) < self.mutation_p
new_pop = pop + noise * mask
return new_pop
def generate(self, sounds, targets):
raw_targets = targets
sounds = sounds.cpu().numpy()
targets = target_sentence_to_label(targets)
targets = targets.view(1,-1).to(self.device).detach()
target_lengths = torch.IntTensor([targets.shape[1]]).view(1,-1)
pop = np.tile(sounds, (self.pop_size, 1))
prev_loss = None
dist = np.inf
mutation_p = self.mutation_p
with torch.no_grad():
for iter in tqdm(range(self.iterations)):
pop_scores = self.get_fitness_score(pop, targets, target_lengths).cpu().numpy()
elite_ind = np.argsort(pop_scores)[-self.elite_size:]
elite_pop, elite_pop_scores = pop[elite_ind], pop_scores[elite_ind]
if prev_loss is not None and prev_loss != elite_pop_scores[-1]:
mutation_p = self.momentum * mutation_p + self.alpha / np.abs(prev_loss - elite_pop_scores[-1])
if iter % self.freq_disp == 0 or iter == self.iterations - 1:
print('Current loss: ', -elite_pop_scores[-1])
best_pop = elite_pop[None, -1]
best_text = self.get_text(best_pop)[0][0]
dist = levenshteinDistance(best_text, raw_targets)
print('{}; {}; {}'.format(best_text, raw_targets, dist))
if best_text == raw_targets:
break
if dist > 2:
next_pop = self.get_new_pop(elite_pop, elite_pop_scores, self.pop_size)
pop = self.mutate_pop(next_pop)
prev_loss = elite_pop_scores[-1]
else:
perturbed = np.tile(elite_pop[None, -1], (self.num_points_estimate, 1))
indices = np.random.choice(pop.shape[1], self.num_points_estimate, replace=False)
perturbed[np.arange(self.num_points_estimate), indices] += self.delta_for_gradient
perturbed_scores = self.get_fitness_score(perturbed, targets, target_lengths).cpu().numpy()
grad = (perturbed_scores - elite_pop_scores[-1]) / self.delta_for_gradient
grad = grad / np.abs(grad).max()
modified = elite_pop[-1].copy()
modified[indices] += grad * self.delta_for_perturbation
pop = | np.tile(modified[None, :], (self.pop_size, 1)) | numpy.tile |
"""
Genereate ablated modality images. One time use code.
Modality ablation experiment. Generate and save the ablated brats images
Generate dataset with
Save in the directory: Path(brats_path).parent / "ablated_brats", and can be loaded with the script:
T1 = os.path.join(image_path_list[0], bratsID, bratsID+'_t1.nii.gz') # (240, 240, 155)
T1c = os.path.join(image_path_list[1], bratsID, bratsID+'_t1ce.nii.gz')
T2 = os.path.join(image_path_list[2], bratsID, bratsID+'_t2.nii.gz')
FLAIR = os.path.join(image_path_list[3], bratsID, bratsID+'_flair.nii.gz')
For the original brats image, ablate it by filling in the non-zero value with random values ~ N(image_mean, image_std)
"""
import nibabel
import os
from pathlib import Path
import numpy as np
import pandas as pd
import itertools, math
from monai.data import write_nifti
from monai.transforms import LoadNifti
import monai
# from .heatmap_utils import get_heatmaps
from .heatmap_utlis import *
from scipy import stats
from scipy.stats import spearmanr as spr
from scipy.stats import kendalltau
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, recall_score, precision_score, confusion_matrix
import csv
import itertools, math
import copy
from validate import test
from datetime import datetime
# from skimage.morphology import binary_dilation
# print(monai.__version__)
from sklearn.metrics import auc, roc_curve
def generate_ablated_dataset(modalities = ["t1", "t1ce", "t2", "flair"], ablation_mode = 'allzero'):
"""
One time function to get and save the ablated modalities.
:param allzero: replace the modality with all zeros
:return:
"""
data_root = "/local-scratch/authorid/dld_data/brats2020/MICCAI_BraTS2020_TrainingData/all_tmp"
data_root = Path(data_root)
if ablation_mode == 'allzero': # ablate the whole modality, and replace with 0s
saved_path = data_root.parent / "zero_ablated_brats"
elif ablation_mode == 'allnoise': # ablate the whole modality, and replace with nontumor signal noises
saved_path = data_root.parent / "ablated_brats"
elif ablation_mode == 'lesionzero': # ablate the lesion only on the modality, and replace with 0s
saved_path = data_root.parent / "lesionzero"
# read brain MRI
ids = [f for f in os.listdir(data_root) if os.path.isdir(os.path.join(data_root, f))]
for id in ids:
seg_path = data_root / id / "{}_seg.nii.gz".format(id)
seg = nibabel.load(seg_path).get_fdata()
for m in modalities:
path = data_root/id / "{}_{}.nii.gz".format(id, m)
# mri = nibabel.load(path)
# img_data = mri.get_fdata()
loader = LoadNifti(image_only = False)
img_data, header = loader(path)
if ablation_mode == "allzero":
ablate_array = np.zeros(img_data.shape)
elif ablation_mode == 'allnoise':
ablate_array = ablate_signal(img_data, seg)
elif ablation_mode == 'lesionzero':
ablate_array = ablate_tumor_only(img_data, seg)
# nibabel.save(ablate_array, "{}_{}.nii.gz".format(id, m))
output_root = saved_path/id
output_root.mkdir(exist_ok=True, parents=True)
print(header['affine'], header['original_affine'])
write_nifti(ablate_array,
affine= header['affine'],
target_affine = header['original_affine'],
file_name = output_root/"{}_{}.nii.gz".format(id, m))
# saver = NiftiSaver(data_root_dir = output_root, output_postfix = None, output_ext='.nii.gz')
# saver.save(ablate_array, {'filename_or_obj': "{}_{}".format(id, m)})
def ablate_tumor_only(array, seg):
edge = 10
dilated_seg = []
for s in range(array.shape[-1]):
dilated= binary_dilation(seg[:,:,s], selem = np.ones([edge for i in range(seg[:,:,s].ndim)]))
dilated_seg.append(dilated)
dilated_seg = np.stack(dilated_seg, axis=-1)
ablated_array = np.copy(array)
ablated_array[dilated_seg > 0] = 0
return ablated_array
def ablate_signal(array, seg):
"""Helper: given a image array, replace the non-zero value by sampling from the rest non-tumor regions (with replacement, so
that to keep the same distribution)
"""
non_tumor = array[(array != 0) & (seg != 1) & (seg != 2) & (seg != 4)].flatten() # get brain region with non-tumor part [0. 1. 2. 4.]
print(np.unique(seg))
print(non_tumor.shape)
# mean = np.mean(array)
# std = np.std(array)
ablated_array = np.random.choice(non_tumor, size=array.shape, replace=True)
ablated_array[array == 0] = 0
print('ablated_array', ablated_array.shape)
return ablated_array
### Utlities to get gt modality shapley value, and compare hm value with this gt ###
def modality_shapley(config, ablated_image_folder, csv_save_dir = "/local-scratch/authorid/BRATS_IDH/log/mod_shapley"):
"""
Modality ablation experiment. Generate and save the ablated brats images
Generate dataset with
Save in the directory: Path(brats_path).parent / "ablated_brats", and can be loaded with the script:
T1 = os.path.join(image_path_list[0], bratsID, bratsID+'_t1.nii.gz') # (240, 240, 155)
T1c = os.path.join(image_path_list[1], bratsID, bratsID+'_t1ce.nii.gz')
T2 = os.path.join(image_path_list[2], bratsID, bratsID+'_t2.nii.gz')
FLAIR = os.path.join(image_path_list[3], bratsID, bratsID+'_flair.nii.gz')
For the original brats image, ablate it by filling in the non-zero value with random values ~ N(image_mean, image_std)
"""
modalities = config['xai']['modality']
print(modalities)
# generate modality combinations
N_sets = list(itertools.product([0, 1], repeat=len(modalities)) ) # set of all_combinations
for modality_selection in N_sets:
test(config, timestamp = False, ablated_image_folder = ablated_image_folder, csv_save_dir = csv_save_dir, modality_selection= modality_selection)
def shapley_result_csv(fold = 1, root = '/local-scratch/authorid/BRATS_IDH/log/mod_shapley/test/', modalities= ["t1", "t1ce", "t2", "flair"], metric = 'acc'):
"""
From the individual test records, get the summarized csv of modality: accuracy pair.
:param fold:
:param path:
:return:
"""
# get all csvs in the folder
save_path = Path(root)/"shapley"
save_path.mkdir(parents = True, exist_ok= True)
csv_filename = save_path / 'aggregated_performance_fold_{}.csv'.format(fold)
file_exists = os.path.isfile(csv_filename)
fnames = modalities+["accuracy"]
with open(csv_filename, 'w', newline='') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=fnames)
# if not file_exists:
csv_writer.writeheader()
for f in Path(root).rglob('*cv_result_fold*.csv'):
fn = f.name.split(".")[0].split("-")
if len(fn) == len(modalities)+1:
fold_num = fn[0].split("_")[-1]
else:
fold_num = fn[1].split("_")[-1]
fold_num = int(fold_num)
if fold_num == fold:
if len(fn) == len(modalities) + 1:
modelity_selection = [int(i) for i in fn[1:]]
else:
modelity_selection = [int(i) for i in fn[2:]]
# print( fold_num, modelity_selection)
results = pd.read_csv(f)
gt = results['gt']
pred = results['pred']
if metric == 'auc':
fpr, tpr, threshold = roc_curve(results['gt'].to_list(), results['pred'].to_list())
accuracy = auc(fpr, tpr)
else:
accuracy = accuracy_score(gt, pred)
csv_record = {'accuracy': accuracy}
for i, m in enumerate(modalities):
csv_record[m]= modelity_selection[i]
csv_writer.writerow(csv_record)
print("Fold {}: modality: {}, accuracy: {}".format(fold, modelity_selection, accuracy))
print("Saved at {}".format(csv_filename))
return csv_filename
def get_shapley(csv_filename, modalities = ["t1", "t1ce", "t2", "flair"]):
"""
calculate modality shapeley value
CSV with column: t1, t1c, t2, flair, of 0 / 1. and perforamnce value.
:param csv:
:return:
"""
# convert csv to dict: {(0, 0, 1, 0): 10} {tuple: performance}
df = pd.read_csv(csv_filename)
fold = Path(csv_filename).name.split('.')[0].split('_')[-1]
# print(fold)
df_dict = df.to_dict(orient='records')
# print(df_dict)
v_dict = {} #
for row in df_dict:
mod_lst = []
for m in modalities:
mod_lst.append(row[m])
v_dict[tuple(mod_lst)] = row['accuracy']
# print(v_dict)
n = len(modalities)
# sanity check if all mod combinations are exists
N_sets = list(itertools.product([0,1],repeat = len(modalities))) # set of all_combinations
for s in N_sets:
if tuple(s) not in v_dict:
print("ERROR in get_shapley! {} missing".format(s))
N_sets_array = np.array(N_sets) # array([[0, 0, 0, 0], [0, 0, 0, 1],
mod_shapley = {}
# for each mod, calculate its shapley value:
for i, mod in enumerate(modalities):
# get combination not including mod
n_not_i = N_sets_array[N_sets_array[:, i]==0]# # a list containing all subsets that don't contains i todo
# print(n_not_i, i)
phi_i= 0
for s in n_not_i:
# print('s', s)
v_s = v_dict[tuple(s)]
sANDi = copy.deepcopy(s)
sANDi[i] =1
v_sANDi = v_dict[tuple(sANDi)]
# print(s , s.sum(), i, mod)
phi_i += (v_sANDi - v_s) * math.factorial(s.sum()) * (math.factorial(n - s.sum() - 1)) / math.factorial(n)
mod_shapley[mod] = phi_i
mod_shapley['fold'] = fold
print(mod_shapley)
# save gt shapley to csv
with open(Path(csv_filename).parent/'fold_{}_modality_shapley.csv'.format(fold), 'w') as f:
csv_writer = csv.DictWriter(f, fieldnames=list(mod_shapley.keys()))
csv_writer.writeheader()
csv_writer.writerow(mod_shapley)
# for key in mod_shapley.keys():
# f.write("%s,%s\n" % (key, mod_shapley[key]))
return mod_shapley
def get_shapley_gt_multiple_runs_pipeline(config, run_num, ablated_image_folder, csv_save_dir):
"""Since the shapley value gt is not deterministic, run multiple run_num to get the distribution of gt modality shapley value."""
modalities = config['xai']['modality']
fold = config['data_loader']['args']['fold']
# support multiple runtime, check if file exists
existing_runs = [f for f in os.listdir(csv_save_dir) if os.path.isdir(os.path.join(csv_save_dir, f))]
existing_runs.sort()
starting_run = -1
for i in existing_runs:
i = int(i)
shapley_csv = os.path.join(csv_save_dir, "{}".format(i), 'shapley', 'fold_{}_modality_shapley.csv'.format(fold))
file_exists = os.path.isfile(shapley_csv)
if file_exists:
starting_run = i
else:
break
if starting_run >= run_num:
return
for run_i in range(starting_run+1, run_num):
run_dir = os.path.join(csv_save_dir, "{}".format(run_i))
modality_shapley(config, ablated_image_folder = ablated_image_folder, csv_save_dir= run_dir)
csv_filename = shapley_result_csv(fold = fold, modalities=modalities, root = run_dir)
print(csv_filename)
get_shapley(csv_filename, modalities=modalities)
def aggregate_shapley_gt_mean_std(fold, csv_save_dir, modalities):
# calculate the mean and std of the multiple run shapley
result_list = []
runs = [f for f in os.listdir(csv_save_dir) if os.path.isdir(os.path.join(csv_save_dir, f))]
for run_i in runs:
shapley_csv = os.path.join(csv_save_dir, "{}".format(run_i), 'shapley', 'fold_{}_modality_shapley.csv'.format(fold))
file_exists = os.path.isfile(shapley_csv)
if file_exists:
df = pd.read_csv(shapley_csv)
df = df.iloc[0]#.to_dict('list')
# print(df)
gt_shapley = [df[m] for m in modalities]
result_list.append(gt_shapley)
result_array = np.array(result_list)
shapley_mean = result_array.mean(axis = 0)
shapley_std = result_array.std(axis = 0)
print(result_array)
print("Shapley mean: {}, std {}".format(shapley_mean, shapley_std))
# save the mean and std as two csv files
mean_shapley, std_shapley = {}, {}
mean_shapley['fold'], std_shapley['fold'] = fold, fold
# for each mod, calculate its shapley value:
for i, mod in enumerate(modalities):
mean_shapley[mod] = shapley_mean[i]
std_shapley[mod] = shapley_std[i]
with open(os.path.join(csv_save_dir, 'multirun_gt_shapley_fold_{}.csv'.format(fold)), 'w') as f:
csv_writer = csv.DictWriter(f, fieldnames=list(mean_shapley.keys()))
csv_writer.writeheader()
csv_writer.writerow(mean_shapley)
with open(os.path.join(csv_save_dir, 'multirun_gt_shapleySTD_fold_{}.csv'.format(fold)), 'w') as f:
csv_writer = csv.DictWriter(f, fieldnames=list(std_shapley.keys()))
csv_writer.writeheader()
csv_writer.writerow(std_shapley)
def get_modality_feature_hm_value(post_hm, seg, penalize = True, portion = True):
"""
Get positive+negative values inside tumor regions, minus positive values outside tumor regions
(penalty for positive values outside tumor)
:param post_hm: np array of the same shape with seg
:param penalize: old parameter. No longer needed with new parameter portion
:return:
"""
assert seg.shape == post_hm.shape[1:], "segmentation map shape {} and processed hm shape {} does not match!".format(seg.shape, post_hm.shape[1:])
# binary_seg = seg[seg>0]
edge = 20
dilated_seg = []
for s in range(seg.shape[-1]):
dilated= binary_dilation(seg[:,:,s], selem = np.ones([edge for i in range(seg[:,:,s].ndim)]))
dilated_seg.append(dilated)
dilated_seg = np.stack(dilated_seg, axis = -1)
print((seg>0).sum()/seg.size, (dilated_seg>0).sum()/dilated_seg.size, dilated_seg.shape)
hm_values = []
for hm in post_hm:
feature = hm[(dilated_seg>0) & (hm>0)]
non_feature = hm[(dilated_seg==0) & (hm>0)]
if portion:
v = feature.sum() / ( feature.sum() + non_feature.sum() )
if (v < 0):
print( feature.sum() , feature.shape, non_feature.shape,non_feature.sum())
else:
v = feature.sum()
if penalize:
v -= non_feature.sum()
hm_values.append(v)
print(hm_values, np.sum(post_hm, axis = tuple([i for i in range(4)][1:])), '\n')
return hm_values
def get_save_modality_hm_value(hm_save_dir, result_save_dir, fold, method_list, penalize= False, portion_metrics= True, positiveHMonly = True, segment_path = None, modalities= ["t1", "t1ce", "t2", "flair"]):
'''
Since read hm is time consuming, read and save the hm values for each method
:param hm_save_dir:
:param method_list:
:param shapley_csv:
:param localize_feature: if True, calculate the sum of hm values using lesion masks.
Get positive+negative values inside tumor regions, minus positive values outside tumor regions
(penalty for positive values outside tumor)
:return:
'''
Path(result_save_dir).mkdir(parents=True, exist_ok=True)
columns = modalities+ ['XAI', 'dataID']
for method in method_list:
result_csv = Path(result_save_dir) / 'modalityHM_fold-{}-{}.csv'.format(fold, method)
file_exists = os.path.isfile(result_csv)
if file_exists:
print("{} exists, pass".format(method))
continue
result_df = pd.DataFrame(columns=columns)
value = {}
# post-process hms
# print(method)
hm_dict, data_record = get_heatmaps(hm_save_dir, method, by_data=False, hm_as_array=False, return_mri=False)
print("Number of data for {}: {}".format(method, len(hm_dict.keys()))) # , hm_dict.keys())
for dataID, hm in hm_dict.items():
print(hm.min(), hm.max(), dataID)
post_hm = postprocess_heatmaps(hm, no_neg=positiveHMonly) # (C, H,W,D) # the postprocessed hm is already non-negative
if segment_path:
seg_path = os.path.join(segment_path, dataID, dataID + '_seg.nii.gz')
seg = nibabel.load(seg_path).get_fdata()
seg = np.rot90(seg, k=3, axes=(0, 1)) # important, avoid bug of seg, saliency map mismatch
hm_values = get_modality_feature_hm_value(post_hm, seg, penalize=penalize, portion = portion_metrics)
else:
if positiveHMonly:
positive_hm = np.copy(post_hm)
positive_hm[positive_hm <0] =0
hm_values = np.sum(positive_hm, axis = tuple([i for i in range(len(modalities))][1:]))
else:
hm_values = np.sum(post_hm, axis = tuple([i for i in range(len(modalities))][1:]))
# print(method, dataID, corr, p_value)
value["XAI"] = method
value['dataID'] = dataID
for i, mod in enumerate(modalities):
value[mod] = hm_values[i]
result_series= pd.Series(value, index=columns)
result_df= result_df.append(result_series, ignore_index=True)
# print(result_df)
# result_df = pd.DataFrame.from_dict(result, orient = 'index')
result_df.to_csv(result_csv)
print("modalityHM Saved at: {}".format(result_csv))
return result_csv
# def corr_modality_shapley(hm_save_dir, method_list, shapley_csv, modalities= ["t1", "t1ce", "t2", "flair"]):
# ''''''
# fold = Path(shapley_csv).name.split('.')[0].split('_')[1] #fold_{}_modality_shapley.csv'
# df = pd.read_csv(shapley_csv)
# # print(df)
# df = df.iloc[0]#.to_dict('list')
# # print(df)
# gt_shapley = [df[m] for m in modalities]
# # print(gt_shapley)
# columns = modalities+ ['XAI', 'correlation', 'p_value', 'dataID']
#
# for method in method_list:
# result_csv = Path(shapley_csv).parent / 'CorrModalityShapley_fold-{}-{}.csv'.format(fold, method)
# file_exists = os.path.isfile(result_csv)
# if file_exists:
# print("{} exists, pass".format(file_exists))
# continue
# result_df = pd.DataFrame(columns=columns)
# correlations = {}
# gt_results = list()
# # post-process hms
# hm_dict, data_record = get_heatmaps(hm_save_dir, method, by_data=False, hm_as_array=False, return_mri=False)
# print("Number of data to be evaluated for {}: {}".format(method, len(hm_dict.keys()))) # , hm_dict.keys())
# for dataID, hm in hm_dict.items():
# post_hm = postprocess_heatmaps(hm) # (C, H,W,D)
# hm_values = np.sum(post_hm, axis = tuple([i for i in range(len(modalities))][1:]))
# corr, p_value = spr(gt_shapley, hm_values)
# # print(method, dataID, corr, p_value)
# correlations["XAI"] = method
# correlations["correlation"] = corr
# correlations["p_value"] = p_value
# correlations['dataID'] = dataID
# for i, mod in enumerate(modalities):
# correlations[mod] = hm_values[i]
# result_series= pd.Series(correlations, index=columns)
# result_df= result_df.append(result_series, ignore_index=True)
# print(result_df)
# # result_df = pd.DataFrame.from_dict(result, orient = 'index')
# result_df.to_csv(result_csv)
# print("corr_modality_shapley Saved at: {}".format(result_csv))
# return result_csv
def compute_xai_mod_shapley_corr(hm_result_csv_root, gt_csv_path, modalities= ["t1", "t1ce", "t2", "flair"], corr_name = "pearson"):
fold_dict = {}
if corr_name == 'pearson':
corr_method = stats.pearsonr
elif corr_name == 'spr':
corr_method = spr
elif corr_name == 'kendalltau':
corr_method = kendalltau
# get all hm value csv files for each
for f in Path(hm_result_csv_root).rglob('modalityHM_fold*.csv'):
fold = f.name.split('.')[0].split("-")[1]
if fold in fold_dict:
fold_dict[fold].append(f)
else:
fold_dict[fold] = [f]
columns = ['XAI', 'fold', 'corr','p_value', 'data_wise_corr', 'data_wise_std' ] + modalities
result_df = pd.DataFrame(columns=columns)
for fold, files in fold_dict.items():
# get mod shapley gt
shapley_csv = os.path.join(gt_csv_path, 'multirun_gt_shapley_fold_{}.csv'.format(fold))
if not os.path.isfile(shapley_csv):
if gt_csv_path[-2:] =='mi':
shapley_csv = Path(gt_csv_path) / 'seed_{}'.format(fold) /'shapley' / 'fold_{}_modality_shapley.csv'.format(fold)
else:
shapley_csv = Path(gt_csv_path) / 'shapley' / 'fold_{}_modality_shapley.csv'.format(fold)
# elif not os.path.isfile(shapley_csv):
# shapley_csv = Path(gt_csv_path) / 'shapley' / 'fold_{}_modality_shapley.csv'.format(fold)
# print("shapley_csv", shapley_csv)
df = pd.read_csv(shapley_csv)
df = df.iloc[0]#.to_dict('list')
# print(df)
gt_shapley = [df[m] for m in modalities]
print(fold, gt_shapley)
for fl in files:
method = fl.name.split('.')[0].split('-')[-1]
hm_df = pd.read_csv(fl)
# print("file", fl, hm_df )
result = {}
# print( hm_df.mean(axis=0))
result['XAI'] = hm_df['XAI'].unique()[0]
result['fold'] = fold
hm_mean = hm_df.mean(axis=0)
hm_value_dataset = [hm_mean[m] for m in modalities]
hm_df['XAI'] = method
hm_df['Fold'] = fold
for i,m in enumerate(modalities):
result[m] = hm_mean[m]
# print(hm_value_dataset)
result["corr"], result["p_value"] = corr_method(gt_shapley, hm_value_dataset)
hm_df['kendalltau'] = hm_df.apply(lambda row: corr_method(gt_shapley, row[modalities]).correlation, axis=1)
hm_df['pvalue'] = hm_df.apply(lambda row: corr_method(gt_shapley, row[modalities]).pvalue, axis=1)
correlation = list(hm_df['kendalltau'])
# hm_df['kendalltau'] = 0
# hm_df['pvalue'] = 0
# for index, row in hm_df.iterrows():
# corr, p_value = corr_method(gt_shapley, row[modalities])
# correlation.append(corr)
# hm_df['kendalltau'] = corr
# hm_df['pvalue'] = p_value
kandall_dir = fl.parent.parent/ 'kendalltau'
kandall_dir.mkdir(parents=True, exist_ok=True)
hm_df.to_csv(os.path.join(kandall_dir, fl.name))
data_wise_corr = | np.array(correlation) | numpy.array |
from keras.layers import Input
from keras.models import Model, load_model
from keras.layers import Convolution2D
import tensorflow as tf
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from glob import glob
from keras import backend as K
custom_objects={"GlorotUniform": tf.keras.initializers.glorot_uniform}
model = load_model("d2/models/mod_lin_1.h5", custom_objects)
img_in = Input(shape=(120, 160, 3), name='img_in')
x = img_in
x = Convolution2D(24, (5,5), strides=(2,2), activation='relu', name='conv2d_1')(x)
x = Convolution2D(32, (5,5), strides=(2,2), activation='relu', name='conv2d_2')(x)
x = Convolution2D(64, (5,5), strides=(2,2), activation='relu', name='conv2d_3')(x)
x = Convolution2D(64, (3,3), strides=(2,2), activation='relu', name='conv2d_4')(x)
conv_5 = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name='conv2d_5')(x)
convolution_part = Model(inputs=[img_in], outputs=[conv_5])
for layer_num in ('1', '2', '3', '4', '5'):
convolution_part.get_layer('conv2d_' + layer_num).set_weights(model.get_layer('conv2d_' + layer_num).get_weights())
inp = convolution_part.input # input placeholder
outputs = [layer.output for layer in convolution_part.layers][1:] # all layer outputs
functor = K.function([inp], outputs)
kernel_3x3 = tf.constant(np.array([
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]]
]), tf.float32)
kernel_5x5 = tf.constant(np.array([
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]]
]), tf.float32)
layers_kernels = {4:kernel_3x3, 3: kernel_3x3, 2: kernel_5x5, 1: kernel_5x5, 0: kernel_5x5}
layers_strides = {4: [1, 1, 1, 1], 3: [1, 2, 2, 1], 2: [1, 2, 2, 1], 1: [1, 2, 2, 1], 0: [1, 2, 2, 1]}
def compute_visualisation_mask(img):
activations = functor([np.array([img])])
upscaled_activation = np.ones((3, 6))
for layer in [4, 3, 2, 1, 0]:
averaged_activation = np.mean(activations[layer], axis=3).squeeze(axis=0) * upscaled_activation
if layer > 0:
output_shape = (activations[layer - 1].shape[1], activations[layer - 1].shape[2])
else:
output_shape = (120, 160)
x = tf.constant(
| np.reshape(averaged_activation, (1,averaged_activation.shape[0],averaged_activation.shape[1],1)) | numpy.reshape |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 4 15:51:00 2022
@author: alexany
"""
import time
import sys
sys.path.insert(0,"d:\\Users\\alexany\\CNN_AF\\23_02_2022")
import numpy as np
import bioformats as bf
#import javabridge
import os
import cv2
from scipy.signal import savgol_filter
import tifffile
import datetime
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import json
#import random
import master_subFunctions as mas
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D, Flatten, BatchNormalization,Activation,Dense
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow as tf
from mobilenetv3_small import MobileNetV3
#---------------------------------------------------------------------------
class CNN_AF(object):
settings_filename = 'CNN_AF_settings.json'
image_filename = 'MMStack_Pos1.ome.tif'
stack_dir_prefix = 'stack'
data_folder = None
zStep = 0.5 # in um
n_frames = None
image_size = 96
proj_shape = None
n_stacks = None
inp_shape = None
pList = [] # list of stack file names
#---------------------------------------------------------------------------
def set_data_info(self,inp_dir_list):
self.pList = []
for f in inp_dir_list:
if os.path.isdir(f):
files = os.listdir(f)
for i in range(len(files)):
if os.path.isdir(f+files[i]):
if self.stack_dir_prefix in files[i]:
fullfilename = f + files[i] + os.path.sep + self.image_filename
if os.path.isfile(fullfilename):
self.pList.append(fullfilename)
try:
self.n_stacks = len(self.pList)
imgs = tifffile.imread(self.pList[0])
self.inp_shape = (imgs.shape[1],imgs.shape[2])
self.n_frames = imgs.shape[0]
#
# pT, pX, pY = getImageInfo(self.pList[0])
# self.inp_shape = (pX,pY)
# self.n_frames = pT
#
self.proj_shape = (max(self.inp_shape[0],self.inp_shape[1]),2)
#
# for var in vars(self):
# print(getattr(self,var))
except:
functionNameAsString = sys._getframe().f_code.co_name
print(functionNameAsString + ' error!')
return False
return True
#---------------------------------------------------------------------------
def save_settings(self,where_to_save_folder):
with open( where_to_save_folder + self.settings_filename, "w" ) as f:
json.dump( self.__dict__, f )
#---------------------------------------------------------------------------
def load_settings(self,fullfilename):
with open(fullfilename) as f:
self.__dict__ = json.load(f)
#---------------------------------------------------------------------------
def get_stack_range_indices(self,k):
beg_k = k*self.n_frames
end_k = (k+1)*self.n_frames
return beg_k, end_k
#---------------------------------------------------------------------------
class stack_data_generator(CNN_AF):
#---------------------------------------------------------------------------
def set_output_folder_in(self,dst_dir):
self.data_folder = createFolder(dst_dir,timestamp())
return self.data_folder
#---------------------------------------------------------------------------
def gen_XY(self,index):
X_image, X_proj, Y = 0,0,0
#
print(self.pList[index])
#
stack = self.load_stack(index)
X_image = self.gen_X_image(stack)
X_proj = self.gen_X_proj(stack)
Y = self.gen_Y(X_image,index)
return X_image, X_proj, Y
#---------------------------------------------------------------------------
def gen_XY_in_range(self,n1,n2):
#
self.save_settings(self.data_folder)
#
for i in range(n1,n2):
X_image, X_proj, Y = self.gen_XY(i)
np.save(self.data_folder + os.path.sep + 'X_image' + '_' + str(i), X_image)
np.save(self.data_folder + os.path.sep + 'X_proj' + '_' + str(i), X_proj)
np.save(self.data_folder + os.path.sep + 'Y' + '_' + str(i), Y)
#---------------------------------------------------------------------------
def load_stack(self,index):
return tifffile.imread(self.pList[index])
#---------------------------------------------------------------------------
def gen_X_image(self,stack):
X_image = np.zeros((int(self.n_frames),int(self.image_size), int(self.image_size)))
for i in range(0, self.n_frames):
u = stack[i,:,:]
#
# modified normalization - YA 25.11.2021
# u = normalize_by_percentile(u,.1,99.9)
# u = np.clip(u,0,1)
#
# original normalization
u = u/np.linalg.norm(u)
#
u = cv2.resize(u, dsize=(int(self.image_size), int(self.image_size)), interpolation=cv2.INTER_CUBIC)
X_image[i,:,:] = u
print(i,' @ ',self.n_frames)
return X_image
#---------------------------------------------------------------------------
def gen_X_proj(self,stack):
proj_len = max(self.inp_shape[0],self.inp_shape[1])
#
X_proj = np.zeros((int(self.n_frames),int(proj_len),int(2)))
#
for i in range(0, self.n_frames):
u = stack[i,:,:]
#
# modified normalization - YA 25.11.2021
u = normalize_by_percentile(u,.1,99.9)
u = np.clip(u,0,1)
#
# original normalization
# u = u/np.linalg.norm(u)
#
up1 = cv2.resize(u, dsize=(int(proj_len), int(proj_len)), interpolation=cv2.INTER_CUBIC)
X_proj[i,:,0] = np.mean(up1,axis = 0)
X_proj[i,:,1] = np.mean(up1,axis = 1)
#
print(i,' @ ',self.n_frames)
return X_proj
#---------------------------------------------------------------------------
def gen_Y(self,stack,index):
#
xSum1 = []
x = stack
numSlice = self.n_frames
#
cgx, xSum1 = center(x, xSum1, numSlice)
yS = 0-int(cgx)
yE = numSlice-int(cgx)
Y = np.arange(yS, yE, 1)
#print('cgx', cgx, 'ys', yS,'yE', yE,'y1',y1)
#print('y0',type(y),y)
#print('new_y', (list((np.asarray(y)-10)/25)))
#y = ((np.asarray(y)-10.0)/25.0).tolist()
#print('y1',type(y), y)
#plt.pause(0.05)
#plt.plot(Y,xSum1)
#plt.title(index)
half_range = (numSlice-1)/2.0
Y = ((np.asarray(Y)-0.0)/half_range).tolist()
return Y
#---------------------------------------------------------------------------
class stack_data_trainer(CNN_AF):
mode = None
MobileNet = None
#
batch_size = 128
epos = 500
#
results_folder = None
train_X, valid_X, train_Y, valid_Y = None, None, None, None
MobileNetV3_num_classes = int(1280)
MobileNetV3_width_multiplier = 1.0
MobileNetV3_l2_reg = 1e-5
#---------------------------------------------------------------------------
def __init__(self,mode=None,MobileNet=None):
assert(mode in ['proj','image'])
assert(MobileNet in ['V2','V3'])
stack_data_trainer.mode = mode
stack_data_trainer.MobileNet = MobileNet
#---------------------------------------------------------------------------
def set_data_folder(self,data_folder):
self.data_folder = data_folder
try:
self.load_settings(self.data_folder + super().settings_filename)
except:
print('error!')
#---------------------------------------------------------------------------
def getXY(self,validation_fraction,validation_only=False):
n_val = int(np.fix(validation_fraction*self.n_stacks))
self.valid_X, self.valid_Y = self.get_xy(0,n_val)
if validation_only: return
self.train_X, self.train_Y = self.get_xy(n_val,self.n_stacks)
#---------------------------------------------------------------------------
def get_xy(self,n1,n2):
n = n2 - n1
sT = int(self.n_frames*n)
prefix = None
if self.mode=='proj':
sX,sY = int(self.proj_shape[0]), int(self.proj_shape[1])
prefix = 'X_proj_'
if self.mode=='image':
sX,sY = int(self.image_size), int(self.image_size)
prefix = 'X_image_'
out_X = np.zeros((sT,sX,sY))
out_Y = np.zeros((sT))
for k in range(n1,n2):
fname = self.data_folder + prefix + str(k) + '.npy'
stack_k = np.load(fname)
beg_k,end_k = self.get_stack_range_indices(k-n1)
out_X[beg_k:end_k,:,:] = stack_k
#
fname = self.data_folder + 'Y_' + str(k) + '.npy'
out_Y[beg_k:end_k] = np.load(fname)
#
print(k-n1,' @ ',n)
return out_X, out_Y
#---------------------------------------------------------------------------
def train(self,validation_fraction):
self.results_folder = createFolder(self.data_folder,'results_' +
self.mode + '_' +
self.MobileNet + '_' + timestamp())
self.save_settings(self.results_folder)
self.getXY(validation_fraction)
#
if self.mode=='proj':
if self.MobileNet=='V2':
self.train_proj_V2()
if self.MobileNet=='V3':
self.train_proj_V3()
if self.mode=='image':
if self.MobileNet=='V2':
self.train_image_V2()
if self.MobileNet=='V3':
self.train_image_V3()
#---------------------------------------------------------------------------
def train_proj_V2(self):
train_X = transform_stack_to_stack_of_square_images(self.train_X)
valid_X = transform_stack_to_stack_of_square_images(self.valid_X)
s = train_X.shape
sV = valid_X.shape
x_train = np.zeros([s[0], s[1], s[2], 1])
x_val = np.zeros([sV[0], sV[1], sV[2], 1])
x_train[:,:,:,0] = train_X
x_val[:,:,:,0] = valid_X
x_train_tensor = tf.convert_to_tensor(x_train)
x_val_tensor = tf.convert_to_tensor(x_val)
#imports the MobileNetV2 model and discards the last 1000 neuron layer.
base_model=tf.keras.applications.MobileNetV2(weights=None,
include_top=False,
input_shape=(s[1],s[2],1))
x = base_model.output
x = GlobalAveragePooling2D()(x)
preds = Dense(1)(x)
model = tf.keras.Model(inputs=base_model.input,outputs=preds)
model.compile(optimizer = 'adam',
#loss = 'sparse_categorical_crossentropy',
loss = 'mse',
metrics = ['mse'])
#saveWeights(model, res_path)
save_model_summary(self.results_folder, model)
#best epoch callback
filepath = self.results_folder+"weights_best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_mse', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
stepsPerEpoch = int(s[0]/self.batch_size)
history = model.fit(x_train_tensor, # Features
self.train_Y, # Target vector
epochs=self.epos,#, # Number of epochs
validation_data=(x_val_tensor, self.valid_Y),
steps_per_epoch = stepsPerEpoch,
verbose=1, # Print description after each epoch
#batch_size=batchSize, # Number of observations per batch
callbacks=callbacks_list # callbacks best model
#callbacks = [callback]
)
#loss = history.history['loss']
acc = history.history['mse']
acc_val = history.history['val_mse']
# save model
pp = self.results_folder + "model"
model.save(pp)
pp = self.results_folder + "acc.npy"
np.save(pp, acc)
pp = self.results_folder + "acc_val.npy"
np.save(pp, acc_val)
#return model, history
mas.plotModelRes(self.results_folder)
mas.save_model_summary(self.results_folder, model)
#---------------------------------------------------------------------------
def train_proj_V3(self):
train_X = transform_stack_to_stack_of_square_images(self.train_X)
valid_X = transform_stack_to_stack_of_square_images(self.valid_X)
s = train_X.shape
sV = valid_X.shape
x_train = np.zeros([s[0], s[1], s[2], 1])
x_val = np.zeros([sV[0], sV[1], sV[2], 1])
x_train[:,:,:,0] = train_X
x_val[:,:,:,0] = valid_X
x_train_tensor = tf.convert_to_tensor(x_train)
x_val_tensor = tf.convert_to_tensor(x_val)
base_model = MobileNetV3(num_classes = self.MobileNetV3_num_classes,
width_multiplier = self.MobileNetV3_width_multiplier,
l2_reg = self.MobileNetV3_l2_reg)
input_shape=(s[1],s[2],1)
input_tensor = tf.keras.layers.Input(shape=input_shape)
x = base_model(input_tensor)
x = base_model.output
#x = GlobalAveragePooling2D()(x)
preds = Dense(1)(x)
model = tf.keras.Model(inputs=[base_model.input],outputs=preds)
model.compile(
optimizer="adam",
loss="mse",
#loss = 'sparse_categorical_crossentropy',
metrics=["mse"])
save_model_summary(self.results_folder, model)
# best epoch callback
#filepath = self.results_folder+"weights_best.hdf5"
#checkpoint = ModelCheckpoint(filepath, monitor='val_mse', verbose=1, save_best_only=True, mode='min')
#callbacks_list = [checkpoint]
filepath = self.results_folder+"log_dir"
callbacks_list = [tf.keras.callbacks.TensorBoard(log_dir=filepath)]
stepsPerEpoch = int(s[0]/self.batch_size)
history = model.fit(x_train_tensor, # Features
self.train_Y, # Target vector
epochs=self.epos,#, # Number of epochs
validation_data=(x_val_tensor, self.valid_Y),
steps_per_epoch = stepsPerEpoch,
verbose=1, # Print description after each epoch
#batch_size=batchSize, # Number of observations per batch
callbacks=callbacks_list # callbacks best model
)
filepath = self.results_folder+"weights_best.hdf5"
model.save_weights(filepath)
#loss = history.history['loss']
acc = history.history['mse']
acc_val = history.history['val_mse']
# save model
pp = self.results_folder + "model"
model.save(pp)
pp = self.results_folder + "acc.npy"
np.save(pp, acc)
pp = self.results_folder + "acc_val.npy"
np.save(pp, acc_val)
mas.plotModelRes(self.results_folder)
mas.save_model_summary(self.results_folder, model)
#---------------------------------------------------------------------------
def train_image_V2(self):
s = self.train_X.shape
sV = self.valid_X.shape
x_train = np.zeros([s[0], s[1], s[2], 1])
x_val = np.zeros([sV[0], sV[1], sV[2], 1])
x_train[:,:,:,0] = self.train_X
x_val[:,:,:,0] = self.valid_X
x_train_tensor = tf.convert_to_tensor(x_train)
x_val_tensor = tf.convert_to_tensor(x_val)
#imports the MobileNetV2 model and discards the last 1000 neuron layer.
base_model=tf.keras.applications.MobileNetV2(weights=None,
include_top=False,
input_shape=(self.image_size,self.image_size,1))
x = base_model.output
x = GlobalAveragePooling2D()(x)
preds = Dense(1)(x)
model = tf.keras.Model(inputs=base_model.input,outputs=preds)
model.compile(optimizer = 'adam',
#loss = 'sparse_categorical_crossentropy',
loss = 'mse',
metrics = ['mse'])
#saveWeights(model, res_path)
save_model_summary(self.results_folder, model)
#best epoch callback
filepath = self.results_folder+"weights_best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_mse', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
stepsPerEpoch = int(s[0]/self.batch_size)
history = model.fit(x_train_tensor, # Features
self.train_Y, # Target vector
epochs=self.epos,#, # Number of epochs
validation_data=(x_val_tensor, self.valid_Y),
steps_per_epoch = stepsPerEpoch,
verbose=1, # Print description after each epoch
#batch_size=batchSize, # Number of observations per batch
callbacks=callbacks_list # callbacks best model
#callbacks = [callback]
)
#loss = history.history['loss']
acc = history.history['mse']
acc_val = history.history['val_mse']
# save model
pp = self.results_folder + "model"
model.save(pp)
pp = self.results_folder + "acc.npy"
| np.save(pp, acc) | numpy.save |
# -*- coding: utf-8 -*-
"""This is a part of neuralflow package/EnergyModel class.
This source file contains template firing rate functions."""
import numpy as np
def custom(x,lambdafunc=None):
"""Custom fr model.
Either supply a function, or leave None if the model of the generated data is not known
Parameters
----------
x : numpy array, dtype=float
SEM grid points
lambdafunc : function.
The default is None.
Returns
-------
fr : numpy array
Firing rate function evaluated on SEM grid
"""
if lambdafunc is not None:
fr=lambdafunc(x)
else:
fr=None
return fr
def rectified_linear(x, r_slope=50.0, x_thresh=-1.0):
"""Rectified-linear firing rate model.
r(x, r_slope, x_thresh) = max[ r_slope*(x - x_thresh), 0 ]
Parameters
----------
x : numpy array, dtype=float
SEM grid points
r_slope : float
Firing-rate slope parameter. The default is 50.0.
x_threshold : float
Firing threshold parameter. The default is -1.0.
Returns
-------
numpy array
Firing rate function evaluated on SEM grid
"""
return np.maximum(r_slope*(x-x_thresh),0.0)
def linear (x, r_slope = 50.0, r_bias = 2):
"""Linear firing rate model.
r(x, r_slope, r_bias) = max[ r_slope * x + r_bias, 0 ]
Parameters
----------
x : numpy array, dtype=float
SEM grid points
r_slope : float
Firing-rate slope parameter. The default is 50.0.
r_bias : float
Firing threshold parameter. The default is 2.
Returns
-------
numpy array
Firing rate function evaluated on SEM grid
"""
return np.maximum(r_slope*x+r_bias,0.0)
def peaks (x, center=np.array([-0.5,0.5]), width=np.array([0.2,0.3]), amp=np.array([1000,800])):
"""Sum of gaussian peaks
f= SUM( A*exp((x-x0)^2/w^2) )
Parameters
----------
x : numpy array, dtype=float
SEM grid points
center : numpy array, dtype=float
Centers of Gaussian peaks. The default is np.array([-0.5,0.5]).
width : numpy array, dtype=float
Widths of Gaussian peaks. The default is np.array([0.2,0.3]).
amp : numpy array, dtype=float
Magnitudes of Gaussian peaks. The default is np.array([1000,800]).
Returns
-------
numpy array
Firing rate function evaluated on SEM grid
"""
if not isinstance(center, np.ndarray):
center, width, amp=np.array([center]), | np.array([width]) | numpy.array |
'''
Created By ILMARE
@Date 2019-3-3
'''
import re
import os
import numpy as np
import cv2
from matplotlib import pyplot as plt
random_transform_args = {
'rotation_range': 10,
'zoom_range': 0.05,
'shift_range': 0.05,
'random_flip': 0.4,
}
def umeyama(src, dst, estimate_scale):
num = src.shape[0]
dim = src.shape[1]
src_mean = src.mean(axis=0)
dst_mean = dst.mean(axis=0)
src_demean = src - src_mean
dst_demean = dst - dst_mean
A = np.dot(dst_demean.T, src_demean) / num
d = np.ones((dim,), dtype=np.double)
if np.linalg.det(A) < 0:
d[dim - 1] = -1
T = np.eye(dim + 1, dtype=np.double)
U, S, V = np.linalg.svd(A)
rank = np.linalg.matrix_rank(A)
if rank == 0:
return np.nan * T
elif rank == dim - 1:
if np.linalg.det(U) * np.linalg.det(V) > 0:
T[:dim, :dim] = np.dot(U, V)
else:
s = d[dim - 1]
d[dim - 1] = -1
T[:dim, :dim] = np.dot(U, np.dot( | np.diag(d) | numpy.diag |
""" A module containing LCPrimitive and its subclasses. They implement
components of a pulsar light curve. Includes primitives (Gaussian,
Lorentzian), etc. as well as more sophisticated holistic templates that
provide single-parameter (location) representations of the light curve.
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/pulsar/lcprimitives.py,v 1.35 2017/03/17 21:37:52 kerrm Exp $
author: <NAME> <<EMAIL>>
"""
# NB -- big TODO -- I don't think wrapped primitives quite correctly return
# Monte Carlo variables because they don't account for the uniform approx.
# perhaps this isn't a big deal
import numpy as np
from scipy.special import erf,i0,i1
from scipy.integrate import simps,quad
from scipy.interpolate import interp1d
from scipy.stats import norm,cauchy
from math import sin,cos,sinh,cosh,atan,tan
ROOT2PI = (2*np.pi)**0.5
R2DI = (2/np.pi)**0.5
ROOT2 = 2**0.5
TWOPI = (2*np.pi)
PI = np.pi*1
MAXWRAPS = 15
MINWRAPS = 3
WRAPEPS = 1e-8
# TODO -- possible "LCBase" class with certain method common to LCPrimitive and LCTemplate
def two_comp_mc(n,w1,w2,loc,func):
""" Helper function to generate MC photons from a two-sided
distribution.
NB -- this should work as is if w1,w2,loc are vectors.
n -- total number of photons
w1 -- scale parameter for func, lefthand peak
w2 -- scale parameter for func, righthand peak
loc -- position of peak
func -- an 'rvs' function from scipy
"""
frac1 = w1/(w1+w2)
# number of photons required from left side
n1 = (np.random.rand(n) < frac1).sum()
r1 = func(loc=0,scale=w1,size=n1)
# reflect and relocate photons to right or lef side
r1 = loc + np.where(r1<=0,r1,-r1)
r2 = func(loc=0,scale=w2,size=n-n1)
r2 = loc + np.where(r2>0,r2,-r2)
return np.mod(np.append(r1,r2),1)
def approx_gradient(func,phases,log10_ens,eps=1e-6):
""" Return a numerical gradient. This works for both LCPrimitive and
LCTemplate objects. HOW AWESOME!
"""
orig_p = func.get_parameters(free=False).copy()
g = np.zeros([len(orig_p),len(phases)])
weights = np.asarray([-1,8,-8,1])/(12*eps)
def do_step(which,eps):
p0 = orig_p.copy()
p0[which] += eps
func.set_parameters(p0,free=False)
return func(phases,log10_ens)
for i in range(len(orig_p)):
# use a 4th-order central difference scheme
for j,w in zip([2,1,-1,-2],weights):
g[i,:] += w*do_step(i,j*eps)
func.set_parameters(orig_p,free=False)
return g
def check_gradient(func,atol=1e-8,rtol=1e-5,quiet=False):
""" Test gradient function with a set of MC photons.
This works with either LCPrimitive or LCTemplate objects.
TODO -- there is trouble with the numerical gradient when
a for the location-related parameters when the finite step
causes the peak to shift from one side of an evaluation phase
to the other."""
en = np.random.rand(1000)*2 + 1 # 100 MeV to 10 GeV
ph = func.random(en)
if hasattr(func,'closest_to_peak'):
eps = min(1e-6,0.2*func.closest_to_peak(ph))
else:
eps = 1e-6
g1 = func.gradient(ph,en,free=False)
g2 = func.approx_gradient(ph,en,eps=eps)
anyfail = False
for i in range(g1.shape[0]):
d1 = np.abs(g1[i]-g2[i])
a = np.argmax(d1)
fail = np.any(d1 > (atol + rtol*np.abs(g2)))
if not quiet:
pass_string = 'FAILED' if fail else 'passed'
print ('%02d (%s) %.3g (abs)'%(i,pass_string,d1.max()))
anyfail = anyfail or fail
return not anyfail
class Fittable(object):
# TODO
""" Base class for any object with fittable parameters.
Handle parameter names, etc. here?"""
def get_parameters(self): pass
def set_parameters(self): pass
def get_bounds(self): pass
def __call__(self,*args):
return self._call(*args)[self.free]
def _call(self,*args):
raise NotImplementedError('Child class instantiates.')
def grad(self,*args):
return self._grad(*args)[self.free]
def _grad(self,*args):
raise NotImplementedError('Child class instantiates.')
class LCPrimitive(object):
""" Base class for various components of a light curve. All "analytic"
light curve models must inherit and must implement the three
'virtual' functions below."""
def is_energy_dependent(self):
return False
def is_two_sided(self):
""" True if primitive is asymmetric. Default is False, two-sided
child classes should override."""
return False
def copy(self):
from copy import deepcopy
return deepcopy(self)
def __call__(self,phases):
raise NotImplementedError('Virtual function must be implemented by child class.')
def integrate(self,x1=0,x2=1,log10_ens=3):
""" Base implemention with scipy quad."""
f = lambda ph: self(ph,log10_ens)
return quad(f,x1,x2)[0]
def cdf(self,x,log10_ens=3):
return self.integrate(x1=0,x2=x,log10_ens=3)
def fwhm(self):
"""Return the full-width at half-maximum of the light curve model."""
return self.hwhm(0)+self.hwhm(1)
def hwhm(self,right=False):
"""Return the half-width at half-maximum of the light curve model."""
raise NotImplementedError('Virtual function must be implemented by child class.')
def init(self):
self.p = np.asarray([1])
self.pnames = []
self.name = 'Default'
self.shortname = 'None'
def _asarrays(self):
for key in ['p','free','bounds','errors','slope','slope_free']:
if hasattr(self,key):
v = self.__dict__[key]
if v is not None:
self.__dict__[key] = np.asarray(v,dtype=bool if 'free' in key else float)
def _default_bounds(self):
bounds = [[]] *len(self.p)
# this order works for LCHarmonic, too
bounds[0] = [0.005,0.5] # width
bounds[-1] = [-1,1] # position
if len(bounds) > 2:
bounds[1] = [0.005,0.5] # width
return bounds
def _default_priors(self):
loc = self.p.copy()
width = np.asarray([0.1]*len(self.p))
enable = np.asarray([False]*len(self.p))
return loc,width,enable
def __init__(self,**kwargs):
""" Generally, class-specific setup work is performed in init.
Here, init is called and certain guaranteed default members
are established."""
self.init()
if not hasattr(self,'bounds'):
self.bounds = self._default_bounds() # default
self.errors = np.zeros_like(self.p)
self.free = np.asarray([True]*len(self.p))
self.__dict__.update(kwargs)
self._asarrays()
self.gauss_prior_loc, self.gauss_prior_width, self.gauss_prior_enable = self._default_priors()
self.shift_mode = False
def _make_p(self,log10_ens=3):
""" Internal method to return parameters appropriate for use
in functional form."""
return [None] + list(self.p)
def set_parameters(self,p,free=True):
if free:
self.p[self.free] = p
else:
self.p[:] = p
# adjust position to be between 0 and 1
self.p[-1] = self.p[-1] % 1
return np.all(self.p >= 0)
def get_parameters(self,free=True):
if free:
return self.p[self.free]
return self.p
def get_parameter_names(self,free=True):
return [p for (p,b) in zip(self.pnames,self.free) if b]
def set_errors(self,errs):
n = self.free.sum()
self.errors[:] = 0.
self.errors[self.free] = errs[:n]
return n
def get_errors(self,free=True):
return self.errors[self.free]
def get_bounds(self): return self.bounds[self.free]
def get_gauss_prior_parameters(self):
mod_array = [False]*(len(self.p)-1)+[True]
return (
self.gauss_prior_loc[self.free],
self.gauss_prior_width[self.free],
np.asarray(mod_array)[self.free],
self.gauss_prior_enable[self.free],
)
def enable_gauss_prior(self,enable=True):
""" [Convenience] Turn on gaussian prior."""
self.gauss_prior_enable[:] = enable
def center_gauss_prior(self,enable=False):
""" [Convenience] Set gauss mode to current params."""
self.gauss_prior_loc[:] = self.p[:]
if enable: self.enable_gauss_prior()
def get_location(self,error=False):
if error: return np.asarray([self.p[-1],self.errors[-1]])
return self.p[-1]
def set_location(self,loc):
self.p[-1] = loc
def get_norm(self,error=False):
#if error: return np.asarray([self.p[0],self.errors[0]])
#return self.p[0]
return 1
def get_width(self,error=False,hwhm=False,right=False):
""" Return the width of the distribution.
Keyword arguments:
-----------------
error [False] if True, return tuple with value and error
hwhm [False] if True, scale width to be HWHM
right [False] if True, return "right" component, else "left".
There is no distinction for symmetric dists.
"""
scale = self.hwhm(right=right)/self.p[int(right)] if hwhm else 1
if error: return np.asarray([self.p[int(right)],self.errors[int(right)]])*scale
return self.p[int(right)]*scale
def get_gradient(self,phases,log10_ens=3):
raise DeprecationWarning()
return self.gradient(phases,log10_ens,free=True)
def gradient(self,phases,log10_ens=3,free=False):
raise NotImplementedError('No gradient function found for this object.')
def random(self,n):
""" Default is accept/reject."""
if n < 1: return 0
M = self(np.asarray([self.p[-1]])) # peak amplitude
rvals = np.empty(n)
position = 0
rfunc = np.random.rand
while True:
cand_phases = rfunc(n)
cand_phases = cand_phases[rfunc(n) < self(cand_phases)/M]
ncands = len(cand_phases)
if ncands == 0: continue
rvals[position:position + ncands] = cand_phases[:n-position]
position += ncands
if position >= n: break
return rvals
def __str__(self):
m=max([len(n) for n in self.pnames])
l = []
errors = self.errors if hasattr(self,'errors') else [0]*len(self.pnames)
for i in range(len(self.pnames)):
fstring = '' if self.free[i] else ' [FIXED]'
n=self.pnames[i][:m]
t_n = n+(m-len(n))*' '
l += [t_n + ': %.4f +\- %.4f%s'%(self.p[i],errors[i],fstring)]
l = [self.name+'\n------------------'] + l
return '\n'.join(l)
def approx_gradient(self,phases,log10_ens=3,eps=1e-5):
return approx_gradient(self,phases,log10_ens,eps=eps)
def check_gradient(self,atol=1e-8,rtol=1e-5,quiet=False):
return check_gradient(self,atol=atol,rtol=rtol,quiet=quiet)
def sanity_checks(self,eps=1e-6):
""" A few checks on normalization, integration, etc. """
errfac = 1
# Normalization test
y,ye = quad(self,0,1)
#t1 = abs(self.p[0]-y)<(ye*errfac)
t1 = abs(1-y)<(ye*errfac)
# integrate method test
#t2 = abs(self.p[0]-self.integrate(0,1))<eps
t2 = abs(1-self.integrate(0,1))<eps
# FWHM test
t3 = (self(self.p[-1])*0.5-self(self.p[-1]-self.fwhm()/2))<eps
# gradient test
try:
t4 = self.check_gradient(quiet=True)
except: t4 = False
# boundary conditions
t5 = abs(self(0)-self(1-eps))<eps
if not t1: print ('Failed Normalization test')
if not t2: print ('Failed integrate method test')
if not t3: print ('Failed FWHM test')
if not t4: print ('Failed gradient test')
if not t5: print ('Did not pass boundary conditions')
return np.all([t1,t2,t3,t4,t5])
def eval_string(self):
""" Return a string that can be evaled to instantiate a nearly-
identical object."""
return '%s(p=%s,free=%s,slope=%s,slope_free=%s)'%(
self.__class__.__name__,str(list(self.p)),str(list(self.free)),
str(list(self.slope)) if hasattr(self,'slope') else None,
str(list(self.slope_free)) if hasattr(self,'slope_free') else None)
def dict_string(self):
""" Return a string to express the object as a dictionary that can
be easily instantiated using its keys."""
def pretty_list(l,places=5):
fmt = '%.'+'%d'%places+'f'
s = ', '.join([fmt%x for x in l])
return '['+s+']'
t = ['name = %s'%self.__class__.__name__,
'p = %s'%(pretty_list(self.p)),
'free = %s'%(str(list(self.free))),
'slope = %s'%(pretty_list(self.slope) if hasattr(self,'slope') else None),
'slope_free = %s'%(str(list(self.slope_free)) if hasattr(self,'slope_free') else None),
]
#return 'dict(\n'+'\n '.join(t)+'\n
return t
def closest_to_peak(self,phases):
""" Return the minimum distance between a member of the array of
phases and the position of the mode of the primitive."""
return np.abs(phases-self.get_location()).min()
def get_fixed_energy_version(self,log10_en=3):
return self
class LCWrappedFunction(LCPrimitive):
""" Super-class for profiles derived from wrapped functions.
While some distributions (e.g. the wrapped normal) converge
quickly, others (e.g. the wrapped Lorentzian) converge very slowly
and must be truncated before machine precision is reached.
In order to preserve normalization, the pdf is slightly adjusted:
f(phi) = sum_(i,-N,N,g(phi+i)) + (1 - int(phi,-N,N,g(phi)) ).
This introduces an additional parameteric dependence which must
be accounted for by computation of the gradient.
"""
def _norm(self,nwraps,log10_ens=3):
""" Compute the truncated portion of the template."""
#return self.p[0]-self.base_int(-nwraps,nwraps+1)
return 1-self.base_int(-nwraps,nwraps+1,log10_ens)
def _grad_norm(self,nwraps,log10_ens=3):
""" Compute the gradient terms due to truncated portion. That is,
since we add on a uniform component beyond nwraps, the
amplitude of this component depends on the CDF and hence on
the parameters.
Default implementation is to ignore these terms, applicable
for rapidly-converging distributions (e.g. wrapped normal with
small width parameter). On the other hand, it is not
negligible for long-tailed distributions, e.g. Lorentzians."""
return None
def __call__(self,phases,log10_ens=3):
""" Return wrapped template + DC component corresponding to truncation."""
results = self.base_func(phases,log10_ens)
for i in range(1,MAXWRAPS+1):
t = self.base_func(phases,log10_ens,index= i)
t += self.base_func(phases,log10_ens,index=-i)
results += t
if (i>=MINWRAPS) and (np.all(t < WRAPEPS)): break
return results+self._norm(i,log10_ens)
def gradient(self,phases,log10_ens=3,free=False):
""" Return the gradient evaluated at a vector of phases.
output : a num_parameter x len(phases) ndarray,
the num_parameter-dim gradient at each phase
"""
results = self.base_grad(phases,log10_ens)
for i in range(1,MAXWRAPS+1):
t = self.base_grad(phases,log10_ens,index=i)
t += self.base_grad(phases,log10_ens,index=-i)
results += t
if (i >= MINWRAPS) and (np.all(t < WRAPEPS)): break
gn = self._grad_norm(i,log10_ens)
if gn is not None:
for i in range(len(gn)):
results[i,:] += gn[i]
if free:
return results[self.free]
return results
def integrate(self,x1,x2,log10_ens=3):
#if(x1==0) and (x2==0): return 1.
# NB -- this method is probably overkill now.
results = self.base_int(x1,x2,log10_ens,index=0)
for i in range(1,MAXWRAPS+1):
t = self.base_int(x1,x2,log10_ens,index=i)
t += self.base_int(x1,x2,log10_ens,index=-i)
results += t
if np.all(t < WRAPEPS):
break
return results+(x2-x1)*self._norm(i,log10_ens)
def base_func(self,phases,log10_ens=3,index=0):
raise NotImplementedError(
'No base_func function found for this object.')
def base_grad(self,phases,log10_ens=3,index=0):
raise NotImplementedError(
'No base_grad function found for this object.')
def base_int(self,phases,log10_ens=3,index=0):
raise NotImplementedError(
'No base_int function found for this object.')
class LCGaussian(LCWrappedFunction):
""" Represent a (wrapped) Gaussian peak.
Parameters
Width the standard deviation parameter of the norm dist.
Location the mode of the Gaussian distribution
"""
def init(self):
self.p = np.asarray([0.03,0.5])
self.pnames = ['Width','Location']
self.name = 'Gaussian'
self.shortname = 'G'
def hwhm(self,right=False):
return self.p[0]*(2 * np.log(2))**0.5
def base_func(self,phases,log10_ens=3,index=0):
e,width,x0 = self._make_p(log10_ens)
z = (phases + index - x0)/width
return (1./(width*ROOT2PI))*np.exp(-0.5*z**2 )
def base_grad(self,phases,log10_ens=3,index=0):
e,width,x0 = self._make_p(log10_ens)
z = (phases + index - x0)/width
f = (1./(width*ROOT2PI))*np.exp(-0.5*z**2 )
return np.asarray([f/width*(z**2 - 1.),f/width*z])
def base_int(self,x1,x2,log10_ens=3,index=0):
e,width,x0 = self._make_p(log10_ens)
z1 = (x1 + index - x0)/width
z2 = (x2 + index - x0)/width
return 0.5*(erf(z2/ROOT2)-erf(z1/ROOT2))
def random(self,n):
if hasattr(n,'__len__'):
n = len(n)
return np.mod(norm.rvs(loc=self.p[-1],scale=self.p[0],size=n),1)
class LCGaussian2(LCWrappedFunction):
""" Represent a (wrapped) two-sided Gaussian peak.
Parameters
Width1 the standard deviation parameter of the norm dist.
Width2 the standard deviation parameter of the norm dist.
Location the mode of the distribution
"""
def init(self):
self.p = np.asarray([0.03,0.03,0.5])
self.pnames = ['Width1','Width2','Location']
self.name = 'Gaussian2'
self.shortname = 'G2'
def is_two_sided(self):
return True
def hwhm(self,right=False):
return (self.p[int(right)])*(2 * np.log(2))**0.5
def base_func(self,phases,log10_ens=3,index=0):
e,width1,width2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
z *= np.where(z <= 0, 1./width1, 1./width2)
return (R2DI/(width1+width2)) * np.exp(-0.5*z**2 )
def base_grad(self,phases,log10_ens=3,index=0):
e,width1,width2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
m = (z <= 0)
w = np.where(m, width1, width2)
z /= w
f = (R2DI/(width1+width2)) * np.exp(-0.5*z**2 )
k = 1./(width1+width2)
z2w = z**2/w
t = f*(z2w-k)
g1 = f*(z2w*( m)-k)
g2 = f*(z2w*(~m)-k)
g3 = f*z/w
return np.asarray([g1,g2,g3])
def base_int(self,x1,x2,log10_ens=3,index=0):
e,width1,width2,x0 = self._make_p(log10_ens)
if index==0 and (x1 < x0) and (x2 > x0):
z1 = (x1 + index - x0)/width1
z2 = (x2 + index - x0)/width2
k1 = 2*width1/(width1+width2)
k2 = 2*width2/(width1+width2)
return 0.5*(k2*erf(z2/ROOT2)-k1*erf(z1/ROOT2))
w = width1 if ((x1+index) < x0) else width2
z1 = (x1 + index - x0)/w
z2 = (x2 + index - x0)/w
k = 2*w/(width1+width2)
return 0.5*k*(erf(z2/ROOT2)-erf(z1/ROOT2))
def random(self,n):
""" Use multinomial technique to return random photons from
both components."""
if hasattr(n,'__len__'):
n = len(n)
return two_comp_mc(n,self.p[0],self.p[1],self.p[-1],norm.rvs)
class LCLorentzian(LCPrimitive):
""" Represent a (wrapped) Lorentzian peak.
Parameters
Width the width paramater of the wrapped Cauchy distribution,
namely HWHM*2PI for narrow distributions
Location the center of the peak in phase
"""
def init(self):
self.p = np.asarray([0.1,0.5])
self.pnames = ['Width','Location']
self.name = 'Lorentzian'
self.shortname = 'L'
def hwhm(self,right=False):
# NB -- bounds on p[1] set such that this is well-defined
return np.arccos( 2-cosh(self.p[0]) )/TWOPI
def __call__(self,phases,log10_ens=3):
e,gamma,loc = self._make_p(log10_ens)
z = TWOPI*(phases-loc)
# NB -- numpy call not as efficient as math.sinh etc.
# but this allows easy inheritance for the energy-dependence
return np.sinh(gamma)/(np.cosh(gamma)-np.cos(z))
def gradient(self,phases,log10_ens=3,free=False):
e,gamma,loc = self._make_p(log10_ens)
z = TWOPI*(phases-loc)
s1 = np.sinh(gamma); c1 = np.cosh(gamma)
c = np.cos(z); s = np.sin(z)
f = s1/(c1-c)
f2 = f**2
g1 = f*(c1/s1) - f2
g2 = f2*(TWOPI/s1)*s
if free:
return np.asarray([g1,g2])[self.free]
return np.asarray([g1,g2])
def random(self,n):
if hasattr(n,'__len__'):
n = len(n)
return np.mod(cauchy.rvs(loc=self.p[-1],scale=self.p[0]/TWOPI,size=n),1)
def integrate(self,x1,x2,log10_ens=3):
# NB -- due to the use of tans below, must be careful to use an angle
# range of -pi/2 to pi/2 rather than 0 to pi as one would want
# I haven't carefully tested this solution
e,gamma,loc = self._make_p(log10_ens)
x1 = PI*(x1-loc)
x2 = PI*(x2-loc)
t = 1./np.tanh(0.5*gamma) # coth(gamma/2)
v2 = np.arctan(t*tan(x2))/PI
v1 = np.arctan(t*tan(x1))/PI
return (v2<=v1) + v2 - v1 # correction for tan wrapping
class LCLorentzian2(LCWrappedFunction):
""" Represent a (wrapped) two-sided Lorentzian peak.
Parameters
Width1 the HWHM of the distribution (left)
Width2 the HWHM of the distribution (right)
Location the mode of the distribution
"""
def init(self):
self.p = np.asarray([0.03,0.03,0.5])
self.pnames = ['Width1','Width2','Location']
self.name = 'Lorentzian2'
self.shortname = 'L2'
def is_two_sided(self):
return True
def hwhm(self,right=False):
return self.p[int(right)]
def _grad_norm(self,nwraps,log10_ens=3):
e,gamma1,gamma2,x0 = self._make_p(log10_ens)
z1 = (-nwraps-x0)/gamma1
z2 = (nwraps+1-x0)/gamma2
t = gamma2*np.arctan(z2)-gamma1*np.arctan(z1)
t1 = 1./(1+z1**2)
t2 = 1./(1+z2**2)
k = 2/(gamma1+gamma2)/PI
f = k*t
g1 = -1./(gamma1+gamma2)-(np.arctan(z1)-z1*t1)/t
g2 = -1./(gamma1+gamma2)+(np.arctan(z2)-z2*t2)/t
g3 = (t1-t2)/t
return [-f*g1,-f*g2,-f*g3]
def base_func(self,phases,log10_ens=3,index=0):
e,gamma1,gamma2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
z *= np.where(z<=0, 1./gamma1, 1./gamma2)
k = 2/(gamma1+gamma2)/PI
return k/(1+z**2)
def base_grad(self,phases,log10_ens=3,index=0):
e,gamma1,gamma2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
m = z < 0
g = np.where(m,1./gamma1,1./gamma2)
t1 = 1+(z*g)**2
t2 = 2*(z*g)/t1
g1 = -1/(gamma1+gamma2)+t2*((m*z)/gamma1**2)
g2 = -1/(gamma1+gamma2)+t2*((~m*z)/gamma2**2)
g3 = t2*g
f = (2./(gamma1+gamma2)/PI)/t1
return np.asarray([f*g1,f*g2,f*g3])
def base_int(self,x1,x2,log10_ens=3,index=0):
gamma1,gamma2,x0 = self.p
# the only case where g1 and g2 can be different is if we're on the
# 0th wrap, i.e. index=0; this also includes the case when we want
# to use base_int to do a "full" integral
if index==0 and (x1 < x0) and (x2 > x0):
g1,g2 = gamma1,gamma2
else:
g1,g2 = [gamma1]*2 if ((x1+index) < x0) else [gamma2]*2
z1 = (x1 + index - x0)/g1
z2 = (x2 + index - x0)/g2
k = (2./(gamma1+gamma2)/PI)
return k*(g2*atan(z2)-g1*atan(z1))
def random(self,n):
""" Use multinomial technique to return random photons from
both components."""
return two_comp_mc(n,self.p[0],self.p[1],self.p[-1],cauchy.rvs)
class LCVonMises(LCPrimitive):
""" Represent a peak from the von Mises distribution. This function is
used in directional statistics and is naturally wrapped.
Parameters:
Width inverse of the 'kappa' parameter in the std. def.
Location the center of the peak in phase
"""
def init(self):
self.p = np.asarray([0.05,0.5])
self.pnames = ['Width','Location']
self.name = 'VonMises'
self.shortname = 'VM'
def hwhm(self,right=False):
return 0.5*np.arccos(self.p[0]*np.log(0.5)+1)/TWOPI
def __call__(self,phases,log10_ens=3):
e,width,loc = self._make_p(log10_ens)
z = TWOPI*(phases-loc)
return np.exp(np.cos(z)/width)/i0(1./width)
def gradient(self,phases,log10_ens=3,free=False):
e,width,loc = self._make_p(log10_ens)
my_i0 = i0(1./width)
my_i1 = i1(1./width)
z = TWOPI*(phases-loc)
cz = np.cos(z)
sz = np.sin(z)
f = (np.exp(cz)/width)/my_i0
return np.asarray([-cz/width**2*f,TWOPI*(sz/width+my_i1/my_i0)*f])
class LCKing(LCWrappedFunction):
""" Represent a (wrapped) King function peak.
Parameters
Sigma the width parameter
Gamma the tail parameter
Location the mode of the distribution
"""
# NOTES -- because we don't integrate over solid angle, the norm
# integral / jacobean for the usual King function isn't trivial;
# need to see if this is a show stopper
def init(self):
self.p = np.asarray([0.03,0.5])
self.pnames = ['Sigma','Gamma','Location']
self.name = 'King'
self.shortname = 'K'
def hwhm(self,right=False):
raise NotImplementedError()
return self.p[0]*(2 * np.log(2))**0.5
def base_func(self,phases,log10_ens=3,index=0):
e,s,g,x0 = self._make_p(log10_ens)
z = phases+index-x0
u = 0.5*(z/s)**2
return (g-1)/g*(1.+u/g)**-g
def base_grad(self,phases,log10_ens=3,index=0):
raise NotImplementedError()
e,width,x0 = self._make_p(log10_ens)
z = (phases + index - x0)/width
f = (1./(width*ROOT2PI))*np.exp(-0.5*z**2 )
return np.asarray([f/width*(z**2 - 1.),f/width*z])
def base_int(self,x1,x2,log10_ens=3,index=0):
e,s,g,x0 = self._make_p(log10_ens)
z1 = x1 + index - x0
z2 = x2 + index - x0
u1 = 0.5*((x1 + index - x0)/s)**2
u2 = 0.5*((x2 + index - x0)/s)**2
f1 = 1-(1.+u1/g)**(1-g)
f2 = 1-(1.+u2/g)**(1-g)
if (z1*z2<0): # span the peak
return 0.5*(f1+f2)
if z1 < 0:
return 0.5*(f1-f2)
return 0.5*(f2-f1)
def random(self,n):
raise NotImplementedError()
if hasattr(n,'__len__'):
n = len(n)
return np.mod(norm.rvs(loc=self.p[-1],scale=self.p[0],size=n),1)
class LCTopHat(LCPrimitive):
""" Represent a top hat function.
Parameters:
Width right edge minus left edge
Location center of top hat
"""
def init(self):
self.p = np.asarray([0.03,0.5])
self.pnames = ['Width','Location']
self.name = 'TopHat'
self.shortname = 'TH'
self.fwhm_scale = 1
def hwhm(self,right=False):
return self.p[0]/2
def __call__(self,phases,wrap=True):
width,x0 = self.p
return np.where(np.mod(phases - x0 + width/2,1) < width,1./width,0)
def random(self,n):
if hasattr(n,'__len__'):
n = len(n)
return np.mod(
np.random.rand(n)*self.p[0]+self.p[-1]-self.p[0]/2,1)
class LCHarmonic(LCPrimitive):
"""Represent a sinusoidal shape corresponding to a harmonic in a Fourier expansion.
Parameters:
Location the phase of maximum
"""
def init(self):
self.p = np.asarray([0.])
self.order = 1
self.pnames = ['Location']
self.name = 'Harmonic'
self.shortname = 'H'
def __call__(self,phases,log10_ens=3):
e,x0 = self._make_p(log10_ens)
return 1+np.cos( (TWOPI*self.order) * (phases - x0 ) )
def integrate(self,x1,x2,log10_ens=3):
e,x0 = self._make_p(log10_ens)
t = self.order*TWOPI
return (x2-x1)+(np.sin(t*(x2-x0))-np.sin(t*(x1-x0)))/t
class LCEmpiricalFourier(LCPrimitive):
""" Calculate a Fourier representation of the light curve.
The only parameter is an overall shift.
Cannot be used with other LCPrimitive objects!
Parameters:
Shift : overall shift from original template phase
"""
def init(self):
self.nharm = 20
self.p = np.asarray([0.])
self.free = np.asarray([True])
self.pnames= ['Shift']
self.name = 'Empirical Fourier Profile'
self.shortname = 'EF'
self.shift_mode = True
def __init__(self,phases=None,input_file=None,**kwargs):
"""Must provide either phases or a template input file!"""
self.init()
self.__dict__.update(kwargs)
if input_file is not None: self.from_file(input_file)
if phases is not None: self.from_phases(phases)
def from_phases(self,phases):
n = float(len(phases))
harmonics = np.arange(1,self.nharm+1)*(2*np.pi)
self.alphas = np.asarray([(np.cos(k*phases)).sum() for k in harmonics])
self.betas = np.asarray([(np.sin(k*phases)).sum() for k in harmonics])
self.alphas /= n; self.betas /= n;
self.harmonics = harmonics
def from_file(self,input_file):
if type(input_file) == type(''):
toks = [line.strip().split() for line in file(input_file) if len(line.strip()) > 0 and '#' not in line]
else: toks = input_file
alphas = []
betas = []
for tok in toks:
if len(tok) != 2: continue
try:
a = float(tok[0])
b = float(tok[1])
alphas += [a]
betas += [b]
except: pass
n = len(alphas)
self.alphas = np.asarray(alphas)
self.betas = np.asarray(betas)
self.nharm = n
self.harmonics = np.arange(1,n+1)*(2*np.pi)
def to_file(self,output_file):
f = file(output_file,'w')
f.write('# fourier\n')
for i in range(self.nharm):
f.write('%s\t%s\n'%(self.alphas[i],self.betas[i]))
def __call__(self,phases):
shift = self.p[0] ; harm = self.harmonics
if shift != 0:
""" shift theorem, for real coefficients
It's probably a wash whether it is faster to simply
subtract from the phases, but it's more fun this way! """
c = np.cos(harms * shift)
s = np.sin(harms * shift)
a = c*self.alphas - s*self.betas
b = s*self.alphas + c*self.betas
else: a,b = self.alphas,self.betas
ak = np.asarray([np.cos(phases*k) for k in harm]).transpose()
bk = np.asarray([np.sin(phases*k) for k in harm]).transpose()
return (1 + 2*(a*ak + b*bk).sum(axis=1))
def integrate(self,x1,x2):
""" The Fourier expansion by definition includes the entire signal, so
the norm is always unity."""
return 1
class LCKernelDensity(LCPrimitive):
""" Calculate a kernel density estimate of the light curve.
The bandwidth is empirical, determined from examining several pulsars.
The only parameter is an overall shift.
Cannot be used with other LCPrimitive objects!
Parameters:
Shift : overall shift from original template phase
"""
def init(self):
self.bw = None
self.use_scale = True
self.max_contrast = 1
self.resolution = 0.001 #interpolation sampling resolution
self.p = np.asarray([0.])
self.free = np.asarray([True])
self.pnames= ['Shift']
self.name = 'Gaussian Kernel Density Estimate'
self.shortname = 'KD'
self.shift_mode = True
def __init__(self,phases=None,input_file=None,**kwargs):
"""Must provide either phases or a template input file!"""
self.init()
self.__dict__.update(kwargs)
if input_file is not None: self.from_file(input_file)
if phases is not None: self.from_phases(phases)
def from_phases(self,phases):
n = len(phases)
# put in "ideal" HE bins after initial calculation of pulsed fraction
# estimate pulsed fraction
h = np.histogram(phases, bins = 100)
o = np.sort(h[0])
p = float((o[o > o[15]] - o[15]).sum()) / o.sum() # based on ~30% clean offpulse
b = o[15]
if self.bw is None:
self.bw = (0.5 * (p**2 * n)**-0.2)/(2*np.pi)
print (p,self.bw)
local_p = np.maximum(h[0] - b,0).astype(float) / h[0]
print (local_p, b)
bgbw = ((1-p)**2*n)**-0.2/(2*np.pi)
print (bgbw)
self.bw = np.minimum((local_p**2 * h[0])**-0.2/100.,bgbw)
keys = np.searchsorted(h[1],phases)
keys[keys==len(h[0])] = len(h[0]) - 1
bw = self.bw[keys]
print (len(phases),len(bw),type(bw))
phases = phases.copy()
self.phases = phases
self.phases.sort()
phases = np.asarray(phases)
self.phases = np.asarray(phases)
print (type(self.phases),type(phases))
hi_mask = np.asarray(phases > 0.9)
lo_mask = np.asarray(phases < 0.1)
self.num = len(phases)
self.phases = np.concatenate([phases[hi_mask]-1,phases])
self.phases = | np.concatenate([self.phases,1+phases[lo_mask]]) | numpy.concatenate |
def test_add():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = cle.push(np.asarray([[4, 5, 6]]))
reference = cle.push(np.asarray([[5, 7, 9]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_add_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[6, 7, 8]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_add_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[6, 4, -6]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_iadd():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = cle.push(np.asarray([[4, 5, 6]]))
reference = cle.push(np.asarray([[5, 7, 9]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_iadd_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[6, 7, 8]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_iadd_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[6, 4, -6]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_subtract():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = cle.push(np.asarray([[1, 5, 6]]))
reference = cle.push(np.asarray([[3, -3, -3]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_subtract_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[-1, -3, -2]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_subtract_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = np.asarray([[1, 5, 6]])
reference = cle.push(np.asarray([[3, -3, -3]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_isubtract():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = cle.push(np.asarray([[1, 5, 6]]))
reference = cle.push(np.asarray([[3, -3, -3]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_isubtract_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[-1, -3, -2]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_isubtract_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = np.asarray([[1, 5, 6]])
reference = cle.push(np.asarray([[3, -3, -3]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_divide():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_divide_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_divide_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_idivide():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_idivide_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_idivide_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_multiply():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_multiply_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_multiply_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_imultiply():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[8, 4, -16]]))
input1 *= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_imultiply_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[8, 4, -16]]))
input1 *= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_imultiply_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[8, 4, -16]]))
input1 *= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_gt():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[1, 0, 0]]))
output = input1 > input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_gt_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[1, 0, 0]]))
output = input1 > input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_gt_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[1, 0, 0]]))
output = input1 > input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_ge():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[1, 1, 0]]))
output = input1 >= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_ge_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[1, 1, 0]]))
output = input1 >= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_ge_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[1, 1, 0]]))
output = input1 >= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_lt():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[0, 0, 1]]))
output = input1 < input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_lt_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[0, 0, 1]]))
output = input1 < input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_lt_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[0, 0, 1]]))
output = input1 < input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_le():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[0, 1, 1]]))
output = input1 <= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_le_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[0, 1, 1]]))
output = input1 <= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_le_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = | np.asarray([[2, 2, 2]]) | numpy.asarray |
"""
Utilities for fitting stacked (drizzled) spectra
"""
from collections import OrderedDict
from imp import reload
import astropy.io.fits as pyfits
import astropy.units as u
import numpy as np
from . import utils
from .utils import GRISM_COLORS, GRISM_MAJOR, GRISM_LIMITS, DEFAULT_LINE_LIST
from .fitting import GroupFitter
def make_templates(grism='G141', return_lists=False, fsps_templates=False,
line_list=DEFAULT_LINE_LIST):
"""Generate template savefile
This script generates the template sets with the emission line
complexes and with individual lines.
Parameters
----------
grism : str
Grism of interest, which defines what FWHM to use for the line
templates.
return_lists : bool
Return the templates rather than saving them to a file
Returns
-------
t_complexes, t_lines : list
If `return` then return two lists of templates. Otherwise,
store them to a `~numpy` save file "templates_{fwhm}.npy".
"""
from .multifit import MultiBeam
if grism == 'G141': # WFC3/IR
fwhm = 1100
elif grism == 'G800L': # ACS/UVIS
fwhm = 1400
elif grism == 'G280': # WFC3/UVIS
fwhm = 1500
elif grism == 'GRISM': # WFIRST
fwhm = 350
else:
fwhm = 700 # G102
# Line complex templates
t_complexes = utils.load_templates(fwhm=fwhm, line_complexes=True,
fsps_templates=fsps_templates)
# Individual lines
# line_list = ['SIII', 'SII', 'Ha', 'OI-6302', 'OIII', 'Hb',
# 'OIII-4363', 'Hg', 'Hd', 'NeIII', 'OII', 'MgII']
t_lines = utils.load_templates(fwhm=fwhm, line_complexes=False,
full_line_list=line_list,
fsps_templates=fsps_templates)
if return_lists:
return t_complexes, t_lines
else:
# Save them to a file
np.save('templates_{0}.npy'.format(fwhm), [t_complexes, t_lines])
print('Wrote `templates_{0}.npy`'.format(fwhm))
class StackFitter(GroupFitter):
def __init__(self, files='gnt_18197.stack.fits', group_name=None, sys_err=0.02, mask_min=0.1, fit_stacks=True, fcontam=1, PAs=None, extensions=None, min_ivar=0.01, overlap_threshold=3, verbose=True, eazyp=None, eazy_ix=0, MW_EBV=0., chi2_threshold=1.5, min_DoF=200):
"""Object for fitting stacked spectra.
Parameters
----------
files : str or list of str
Stack FITS filename. If a list is supplied, e.g., the product
of a `~glob` command, then append all specified files.
group_name : str
Rootname to associate with the object. If none, then default to
`files`.
sys_err : float
Minimum systematic error, interpreted as a fractional error.
The adjusted variance is taken to be
>>> var = var0 + (sys_err*flux)**2
mask_min : float
Only fit 2D pixels where the flat-flambda model has pixel values
greater than `mask_min` times the maximum of the model.
fit_stacks : bool
Fit the stacks of each grism combined from all available PAs. If
False, then fit the PAs individually.
fcontam : float
Parameter to control weighting of contaminated pixels for
`fit_stacks=False`.
"""
if isinstance(files, list):
file=files[0]
else:
file=files
self.files = [file]
if group_name is not None:
self.group_name = group_name
else:
self.group_name = file
if verbose:
print('Load file {0}'.format(file))
self.file = file
self.hdulist = pyfits.open(file)
self.min_ivar = min_ivar
self.sys_err = sys_err
self.fcontam = fcontam
self.MW_EBV = MW_EBV
self.h0 = self.hdulist[0].header.copy()
#self.Ngrism = self.h0['NGRISM']
self.grisms = []
self.ext = []
for i in range(self.h0['NGRISM']):
g = self.h0['GRISM{0:03d}'.format(i+1)]
self.grisms.append(g)
if fit_stacks:
if extensions is not None:
if g not in extensions:
continue
self.ext.append(g)
else:
ng = self.h0['N{0}'.format(g)]
for j in range(ng):
pa = self.h0['{0}{1:02d}'.format(g, j+1)]
if PAs is not None:
if pa not in PAs:
continue
ext = '{0},{1}'.format(g,pa)
if extensions is not None:
if ext not in extensions:
continue
self.ext.append(ext)
self.N = len(self.ext)
self.beams = []
pop = []
for i in range(self.N):
E_i = StackedSpectrum(file=self.file, sys_err=sys_err,
mask_min=mask_min, extver=self.ext[i],
mask_threshold=-1, fcontam=fcontam,
min_ivar=min_ivar, MW_EBV=MW_EBV)
E_i.compute_model()
if np.isfinite(E_i.kernel.sum()) & (E_i.DoF >= min_DoF):
self.beams.append(E_i)
else:
pop.append(i)
for i in pop[::-1]:
self.N -= 1
p = self.ext.pop(i)
# Get some parameters from the beams
self.id = self.h0['ID']
self.ra = self.h0['RA']
self.dec = self.h0['DEC']
self.Asave = {}
## Photometry
self.is_spec = 1
self.Nphot = 0
## Parse the beam data
self._parse_beams_list()
if not fit_stacks:
# self.mask_drizzle_overlaps(threshold=overlap_threshold,
# verbose=verbose)
if chi2_threshold > 0:
orig_ext = [e for e in self.ext]
fit_log, keep_dict, has_bad = self.check_for_bad_PAs(poly_order=3, chi2_threshold=chi2_threshold, fit_background=True, reinit=True, verbose=False)
if has_bad & verbose:
print('Found bad PA. New list: {0}'.format(keep_dict))
if verbose:
print(' {0}'.format(' '.join(self.ext)))
# Read multiple
if isinstance(files, list):
if len(files) > 1:
for file in files[1:]:
extra = StackFitter(files=file, sys_err=sys_err, mask_min=mask_min, fit_stacks=fit_stacks, fcontam=fcontam, pas=pas, extensions=extensions, min_ivar=min_ivar, overlap_threshold=overlap_threshold, eazyp=eazyp, eazy_ix=eazy_ix, chi2_threshold=chi2_threshold, verbose=verbose)
self.extend(extra)
self.idf = np.hstack([b.scif*0+ib for ib, b in enumerate(self.beams)])
self.idf = np.cast[int](self.idf)
# if eazyp is not None:
# self.eazyp = eazyp
#
# # TBD: do matching to eazyp.cat directly?
# self.eazy_ix = eazy_ix
#
# ok_phot = (eazyp.efnu[eazy_ix,:] > 0) & (eazyp.fnu[eazy_ix,:] > eazyp.param['NOT_OBS_THRESHOLD']) & np.isfinite(eazyp.fnu[eazy_ix,:]) & np.isfinite(eazyp.efnu[eazy_ix,:])
# ok_phot = np.squeeze(ok_phot)
# self.ok_phot = ok_phot
#
# self.Nphot = ok_phot.sum()
# if self.Nphot > 0:
#
# # F-lambda photometry, 1e-19 erg/s/cm2/A
# self.photom_eflam = (eazyp.efnu[eazy_ix,:]*eazyp.to_flam*eazyp.zp*eazyp.ext_corr/100.)[ok_phot]
# self.photom_flam = (eazyp.fnu[eazy_ix,:]*eazyp.to_flam*eazyp.zp*eazyp.ext_corr/100.)[ok_phot]
# self.photom_lc = eazyp.lc[ok_phot]
#
# self.scif = np.hstack((self.scif, self.photom_flam))
# self.ivarf = np.hstack((self.ivarf, 1/self.photom_eflam**2))
# self.sivarf = np.hstack((self.sivarf, 1/self.photom_eflam))
# self.wavef = np.hstack((self.wavef, self.photom_lc))
#
# self.weightf = np.hstack((self.weightf, np.ones(self.Nphot)))
# self.fit_mask = np.hstack((self.fit_mask, np.ones(self.Nphot, dtype=bool)))
# self.DoF += self.Nphot
# self.phot_scale = np.array([10.])
def _parse_beams_list(self):
"""
"""
# Parse from self.beams list
self.N = len(self.beams)
self.ext = [E.extver for E in self.beams]
self.Ngrism = OrderedDict()
for beam in self.beams:
if beam.grism in self.Ngrism:
self.Ngrism[beam.grism] += 1
else:
self.Ngrism[beam.grism] = 1
# Make "PA" attribute
self.PA = OrderedDict()
for g in self.Ngrism:
self.PA[g] = OrderedDict()
for i in range(self.N):
grism = self.ext[i].split(',')[0]
if ',' in self.ext[i]:
PA = float(self.ext[i].split(',')[1])
else:
PA = 0
if PA in self.PA[grism]:
self.PA[grism][PA].append(i)
else:
self.PA[grism][PA] = [i]
self.grisms = list(self.PA.keys())
self.Ntot = | np.sum([E.size for E in self.beams]) | numpy.sum |
#!/usr/bin/env python
"""
MagPy-General: Standard pymag package containing the following classes:
Written by <NAME>, <NAME> 2011/2012/2013/2014
Written by <NAME>, <NAME>, <NAME> 2015/2016
Version 0.3 (starting May 2016)
License:
https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import logging
import os
import sys
import tempfile
# ----------------------------------------------------------------------------
# Part 1: Import routines for packages
# ----------------------------------------------------------------------------
logpygen = '' # temporary logger variable
badimports = [] # List of missing packages
nasacdfdir = "c:\CDF Distribution\cdf33_1-dist\lib"
# Logging
# ---------
# Select the user's home directory (platform independent) or environment path
if "MAGPY_LOG_PATH" in os.environ:
path_to_log = os.environ["MAGPY_LOG_PATH"]
if not os.path.exists(path_to_log):
os.makedirs(path_to_log)
else:
path_to_log = tempfile.gettempdir()
def setup_logger(name, warninglevel=logging.WARNING, logfilepath=path_to_log,
logformat='%(asctime)s %(levelname)s - %(name)-6s - %(message)s'):
"""Basic setup function to create a standard logging config. Default output
is to file in /tmp/dir."""
logfile=os.path.join(logfilepath,'magpy.log')
# Check file permission/existance
if not os.path.isfile(logfile):
pass
else:
if os.access(logfile, os.W_OK):
pass
else:
for count in range (1,100):
logfile=os.path.join(logfilepath,'magpy{:02}.log'.format(count))
value = os.access(logfile, os.W_OK)
if value or not os.path.isfile(logfile):
count = 100
break
try:
logging.basicConfig(filename=logfile,
filemode='w',
format=logformat,
level=logging.INFO)
except:
logging.basicConfig(format=logformat,
level=logging.INFO)
logger = logging.getLogger(name)
# Define a Handler which writes "setLevel" messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(warninglevel)
logger.addHandler(console)
return logger
# Package loggers to identify info/problem source
logger = setup_logger(__name__)
# DEPRECATED: replaced by individual module loggers, delete these when sure they're no longer needed:
loggerabs = logging.getLogger('abs')
loggertransfer = logging.getLogger('transf')
loggerdatabase = logging.getLogger('db')
loggerstream = logging.getLogger('stream')
loggerlib = logging.getLogger('lib')
loggerplot = logging.getLogger('plot')
# Special loggers for event notification
stormlogger = logging.getLogger('stream')
logger.info("Initiating MagPy...")
from magpy.version import __version__
logger.info("MagPy version "+str(__version__))
magpyversion = __version__
# Standard packages
# -----------------
try:
import csv
import pickle
import types
import struct
import re
import time, string, os, shutil
#import locale
import copy as cp
import fnmatch
import dateutil.parser as dparser
from tempfile import NamedTemporaryFile
import warnings
from glob import glob, iglob, has_magic
from itertools import groupby
import operator # used for stereoplot legend
from operator import itemgetter
# The following packages are not identically available for python3
try: # python2
import copy_reg as copyreg
except ImportError: # python3
import copyreg as copyreg
# Python 2 and 3: alternative 4
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request, ProxyHandler, install_opener, build_opener
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError, ProxyHandler, install_opener, build_opener
"""
try: # python2
import urllib2
except ImportError: # python3
import urllib.request
"""
try: # python2
import thread
except ImportError: # python3
import _thread
try: # python2
from StringIO import StringIO
pyvers = 2
except ImportError: # python 3
from io import StringIO
pyvers = 3
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: standard packages.\n"
badimports.append(e)
# operating system
try:
PLATFORM = sys.platform
logger.info("Running on platform: {}".format(PLATFORM))
except:
PLATFORM = 'unkown'
# Matplotlib
# ----------
try:
import matplotlib
gui_env = ['TKAgg','GTKAgg','Qt4Agg','WXAgg','Agg']
try:
if not os.isatty(sys.stdout.fileno()): # checks if stdout is connected to a terminal (if not, cron is starting the job)
logger.info("No terminal connected - assuming cron job and using Agg for matplotlib")
gui_env = ['Agg','TKAgg','GTKAgg','Qt4Agg','WXAgg']
matplotlib.use('Agg') # For using cron
except:
logger.warning("Problems with identfying cron job - windows system?")
pass
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: problem with matplotlib.\n"
badimports.append(e)
try:
version = matplotlib.__version__.replace('svn', '')
try:
version = map(int, version.replace("rc","").split("."))
MATPLOTLIB_VERSION = list(version)
except:
version = version.strip("rc")
MATPLOTLIB_VERSION = version
logger.info("Loaded Matplotlib - Version %s" % str(MATPLOTLIB_VERSION))
for gui in gui_env:
try:
logger.info("Testing backend {}".format(gui))
try: # will be important from matplotlib3.3 onwards
matplotlib.use(gui, force=True)
except:
matplotlib.use(gui, warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
logger.info("Using backend: {}".format(matplotlib.get_backend()))
from matplotlib.colors import Normalize
from matplotlib.widgets import RectangleSelector, RadioButtons
#from matplotlib.colorbar import ColorbarBase
from matplotlib import mlab
from matplotlib.dates import date2num, num2date
import matplotlib.cm as cm
from pylab import *
from datetime import datetime, timedelta
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError with matplotlib package. Please install to proceed.\n"
logpygen += " ... if installed please check the permissions on .matplotlib in your homedirectory.\n"
badimports.append(e)
# Numpy & SciPy
# -------------
try:
logger.info("Loading Numpy and SciPy...")
import numpy as np
import scipy as sp
from scipy import interpolate
from scipy import stats
from scipy import signal
from scipy.interpolate import UnivariateSpline
from scipy.ndimage import filters
import scipy.optimize as op
import math
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: Python numpy-scipy required - please install to proceed.\n"
badimports.append(e)
# NetCDF
# ------
try:
#print("Loading Netcdf4 support ...")
from netCDF4 import Dataset
except ImportError as e:
#logpygen += "MagPy initiation ImportError: NetCDF not available.\n"
#logpygen += "... if you want to use NetCDF format support please install a current version.\n"
#badimports.append(e)
pass
# NASACDF - SpacePy
# -----------------
def findpath(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return root
try:
logger.info("Loading SpacePy package cdf support ...")
try:
# check for windows
nasacdfdir = findpath('libcdf.dll','C:\CDF_Distribution') ## new path since nasaCDF3.6
if not nasacdfdir:
nasacdfdir = findpath('libcdf.dll','C:\CDF Distribution')
if nasacdfdir:
os.environ["CDF_LIB"] =str(nasacdfdir)
logger.info("Using CDF lib in %s" % nasacdfdir)
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.append(e)
except:
logger.info("... Could not import spacepy")
pass
else:
# create exception and try linux
x=1/0
except:
os.putenv("CDF_LIB", "/usr/local/cdf/lib")
logger.info("using CDF lib in /usr/local/cdf")
### If files (with tt_2000) have been generated with an outdated leapsecondtable
### an exception will occur - to prevent that:
### 1. make sure to use a actual leapsecond table - update cdf regularly
### 2. temporarly set cdf_validate environment variable to no
# This is how option 2 is included TODO -- add this to initialization options
# as an update of cdf is the way to go and not just deactivating the error message
os.putenv("CDF_VALIDATE", "no")
logger.info("... deactivating cdf validation")
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.append(e)
except:
logger.info("... Could not import spacepy")
pass
except ImportError as e:
logpygen += "MagPy initiation ImportError: NASA cdf not available.\n"
logpygen += "... if you want to use NASA CDF format support please install a current version.\n"
badimports.append(e)
if logpygen == '':
logpygen = "OK"
else:
logger.info(logpygen)
logger.info("Missing packages:")
for item in badimports:
logger.info(item)
logger.info("Moving on anyway...")
### Some Python3/2 compatibility code
### taken from http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3/
try:
unicode = unicode
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
# Storing function - http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods#edit2155350
# by <NAME>
# Used here to pickle baseline functions from header and store it in a cdf key.
# Not really a transparent method but working nicely. Underlying functional parameters to reconstruct the fit
# are stored as well but would require a link to the absolute data.
def _pickle_method(method):
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method)
# ----------------------------------------------------------------------------
# Part 2: Define Dictionaries
# ----------------------------------------------------------------------------
# Keys available in DataStream Object:
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
# Empty key values at initiation of stream:
KEYINITDICT = {'time':0,'x':float('nan'),'y':float('nan'),'z':float('nan'),'f':float('nan'),
't1':float('nan'),'t2':float('nan'),'var1':float('nan'),'var2':float('nan'),
'var3':float('nan'),'var4':float('nan'),'var5':float('nan'),'dx':float('nan'),
'dy':float('nan'),'dz':float('nan'),'df':float('nan'),'str1':'-','str2':'-',
'str3':'-','str4':'-','flag':'0000000000000000-','comment':'-','typ':'xyzf',
'sectime':float('nan')}
FLAGKEYLIST = KEYLIST[:16]
# KEYLIST[:8] # only primary values with time
# KEYLIST[1:8] # only primary values without time
# Formats supported by MagPy read function:
PYMAG_SUPPORTED_FORMATS = {
'IAGA':['rw','IAGA 2002 text format'],
'WDC':['rw','World Data Centre format'],
'IMF':['rw', 'Intermagnet Format'],
'IAF':['rw', 'Intermagnet archive Format'],
'BLV':['rw','Baseline format Intermagnet'],
'IYFV':['rw','Yearly mean format Intermagnet'],
'DKA':['rw', 'K value format Intermagnet'],
'DIDD':['rw','Output format from MinGeo DIDD'],
'GSM19':['r', 'Output format from GSM19 magnetometer'],
'COVJSON':['rw', 'Coverage JSON'],
'JSON':['rw', 'JavaScript Object Notation'],
'LEMIHF':['r', 'LEMI text format data'],
'LEMIBIN':['r','Current LEMI binary data format'],
'LEMIBIN1':['r','Deprecated LEMI binary format at WIC'],
'OPT':['r', 'Optical hourly data from WIK'],
'PMAG1':['r','Deprecated ELSEC from WIK'],
'PMAG2':['r', 'Current ELSEC from WIK'],
'GDASA1':['r', 'GDAS binary format'],
'GDASB1':['r', 'GDAS text format'],
'RMRCS':['r', 'RCS data output from Richards perl scripts'],
'RCS':['r', 'RCS raw output'],
'METEO':['r', 'Winklbauer METEO files'],
'NEIC':['r', 'WGET data from USGS - NEIC'],
'LNM':['r', 'Thies Laser-Disdrometer'],
'IWT':['r', 'IWT Tiltmeter data'],
'LIPPGRAV':['r', 'Lippmann Tiltmeter data'],
'GRAVSG':['r', 'GWR TSF data'],
'CR800':['r', 'CR800 datalogger'],
'IONO':['r', 'IM806 Ionometer'],
'RADON':['r', 'single channel analyser gamma data'],
'USBLOG':['r', 'USB temperature logger'],
#'SERSIN':['r', '?'],
#'SERMUL':['r', '?'],
'PYSTR':['rw', 'MagPy full ascii'],
'AUTODIF':['r', 'Deprecated - AutoDIF ouput data'],
'AUTODIF_FREAD':['r', 'Deprecated - Special format for AutoDIF read-in'],
'PYBIN':['r', 'MagPy own binary format'],
'PYASCII':['rw', 'MagPy basic ASCII'],
'POS1TXT':['r', 'POS-1 text format output data'],
'POS1':['r', 'POS-1 binary output at WIC'],
'PMB':['r', 'POS pmb file'],
'QSPIN':['r', 'QSPIN ascii output'],
#'PYNC':['r', 'MagPy NetCDF variant (too be developed)'],
#'DTU1':['r', 'ASCII Data from the DTUs FGE systems'],
#'BDV1':['r', 'Budkov GDAS data variant'],
'GFZTMP':['r', 'GeoForschungsZentrum ascii format'],
'GFZKP':['r', 'GeoForschungsZentrum KP-Index format'],
'PHA':['r', 'Potentially Hazardous Asteroids (PHAs) from the International Astronomical Unions Minor Planet Center, (json, incomplete)'],
'PREDSTORM':['r','PREDSTORM space weather prediction data format'],
'CSV':['rw','comma-separated CSV data'],
'IMAGCDF':['rw','Intermagnet CDF Format'],
'PYCDF':['rw', 'MagPy CDF variant'],
'NOAAACE':['r', 'NOAA ACE satellite data format'],
'NETCDF':['r', 'NetCDF4 format, NOAA DSCOVR satellite data archive format'],
'LATEX':['w','LateX data'],
'CS':['r','Cesium G823'],
#'SFDMI':['r', 'San Fernando variometer'],
#'SFGSM':['r', 'San Fernando GSM90'],
'UNKOWN':['-','Unknown']
}
"""
PYMAG_SUPPORTED_FORMATS = {
'IAGA':'rw', # IAGA 2002 text format
'WDC':'rw', # World Data Centre format
'IMF':'rw', # Intermagnet Format
'IAF':'rw', # Intermagnet archive Format
'IMAGCDF', # Intermagnet CDF Format
'BLV', # Baseline format Intermagnet
'IYFV', # Yearly mean format Intermagnet
'DKA', # K value format Intermagnet
'DIDD', # Output format from DIDD
'GSM19', # Output format from GSM19 magnetometer
'COVJSON', # Coverage JavaScript Object Notation
'JSON', # JavaScript Object Notation
'LEMIHF', # LEMI text format data
'LEMIBIN', # Current LEMI binary data format at WIC
'LEMIBIN1', # Deprecated LEMI binary format at WIC
'OPT', # Optical hourly data from WIK
'PMAG1', # Deprecated ELSEC from WIK
'PMAG2', # Current ELSEC from WIK
'GDASA1', # ?
'GDASB1', # ?
'RMRCS', # RCS data output from Richards perl scripts
'RCS', # RCS data output from Richards perl scripts
'METEO', # RCS data output in METEO files
'NEIC', # WGET data from USGS - NEIC
'LNM', # LaserNiederschlagsMonitor files
'IWT', # Tiltmeter data files at cobs
'LIPPGRAV', # Lippmann Tiltmeter data files at cobs
'CR800', # Data from the CR800 datalogger
'IONO', # Data from IM806 Ionometer
'RADON', # ?
'USBLOG', # ?
'SERSIN', # ?
'SERMUL', # ?
'PYSTR', # MagPy full ascii
'AUTODIF', # AutoDIF ouput data
'AUTODIF_FREAD',# Special format for AutoDIF read-in
'PYCDF', # MagPy CDF variant
'PYBIN', # MagPy own format
'PYASCII', # MagPy basic ASCII
'POS1TXT', # POS-1 text format output data
'POS1', # POS-1 binary output at WIC
'PMB', # POS pmb output
'QSPIN', # QSpin output
'PYNC', # MagPy NetCDF variant (too be developed)
'DTU1', # ASCII Data from the DTU's FGE systems
'SFDMI', # ?
'SFGSM', # ?
'BDV1', # ?
'GFZKP', # GeoForschungsZentrum KP-Index format
'NOAAACE', # NOAA ACE satellite data format
'PREDSTORM' # PREDSTORM space weather prediction data format
'CSV', # comma-separated CSV data with isoformat date in first column
'LATEX', # LateX data
'CS', # ?
'UNKOWN' # 'Unknown'?
}
"""
# ----------------------------------------------------------------------------
# Part 3: Example files for easy access and tests
# ----------------------------------------------------------------------------
from pkg_resources import resource_filename
example1 = resource_filename('magpy', 'examples/example1.zip') #Zip compressed IAGA02
example2 = resource_filename('magpy', 'examples/example2.cdf') #MagPy CDF with F
example3 = resource_filename('magpy', 'examples/example3.txt') #PyStr Baseline
example4 = resource_filename('magpy', 'examples/example4.cdf') #MagPy CDF
example5 = resource_filename('magpy', 'examples/example5.sec') #Imag CDF
example6a = resource_filename('magpy', 'examples/example6a.txt') #DI file
example6b = resource_filename('magpy', 'examples/example6b.txt') #DI file
# ----------------------------------------------------------------------------
# Part 4: Main classes -- DataStream, LineStruct and
# PyMagLog (To be removed)
# ----------------------------------------------------------------------------
class DataStream(object):
"""
Creates a list object from input files /url data
data is organized in columns
keys are column identifier:
key in keys: see KEYLIST
A note on headers:
ALWAYS INITIATE STREAM WITH >>> stream = DataStream([],{}).
All available methods:
----------------------------
- stream.ext(self, columnstructure): # new version of extend function for column operations
- stream.add(self, datlst):
- stream.clear_header(self):
- stream.extend(self,datlst,header):
- stream.union(self,column):
- stream.findtime(self,time):
- stream._find_t_limits(self):
- stream._print_key_headers(self):
- stream._get_key_headers(self,**kwargs):
- stream.sorting(self):
- stream._get_line(self, key, value):
- stream._remove_lines(self, key, value):
- stream._remove_columns(self, keys):
- stream._get_column(self, key):
- stream._put_column(self, column, key, **kwargs):
- stream._move_column(self, key, put2key):
- stream._clear_column(self, key):
- stream._reduce_stream(self, pointlimit=100000):
- stream._aic(self, signal, k, debugmode=None):
- stream._get_k(self, **kwargs):
- stream._get_k_float(self, value, **kwargs):
- stream._get_max(self, key, returntime=False):
- stream._get_min(self, key, returntime=False):
- stream._gf(self, t, tau):
- stream._hf(self, p, x):
- stream._residual_func(self, func, y):
- stream._tau(self, period):
- stream._convertstream(self, coordinate, **kwargs):
- stream._det_trange(self, period):
- stream._is_number(self, s):
- stream._normalize(self, column):
- stream._testtime(self, time):
- stream._drop_nans(self, key):
- stream.aic_calc(self, key, **kwargs):
- stream.baseline(self, absolutestream, **kwargs):
- stream.bindetector(self,key,text=None,**kwargs):
- stream.calc_f(self, **kwargs):
- stream.cut(self,length,kind=0,order=0):
- stream.dailymeans(self):
- stream.date_offset(self, offset):
- stream.delta_f(self, **kwargs):
- stream.dict2stream(self,dictkey='DataBaseValues')
- stream.differentiate(self, **kwargs):
- stream.eventlogger(self, key, values, compare=None, stringvalues=None, addcomment=None, debugmode=None):
- stream.extract(self, key, value, compare=None, debugmode=None):
- stream.extrapolate(self, start, end):
- stream.filter(self, **kwargs):
- stream.fit(self, keys, **kwargs):
- stream.flag_outlier(self, **kwargs):
- stream.flag_stream(self, key, flag, comment, startdate, enddate=None, samplingrate):
- stream.func2stream(self,function,**kwargs):
- stream.func_add(self,function,**kwargs):
- stream.func_subtract(self,function,**kwargs):
- stream.get_gaps(self, **kwargs):
- stream.get_sampling_period(self):
- stream.samplingrate(self, **kwargs):
- stream.integrate(self, **kwargs):
- stream.interpol(self, keys, **kwargs):
- stream.k_fmi(self, **kwargs):
- stream.mean(self, key, **kwargs):
- stream.multiply(self, factors):
- stream.offset(self, offsets):
- stream.randomdrop(self, percentage=None, fixed_indicies=None):
- stream.remove(self, starttime=starttime, endtime=endtime):
- stream.remove_flagged(self, **kwargs):
- stream.resample(self, keys, **kwargs):
- stream.rotation(self,**kwargs):
- stream.scale_correction(self, keys, scales, **kwargs):
- stream.smooth(self, keys, **kwargs):
- stream.steadyrise(self, key, timewindow, **kwargs):
- stream.stream2dict(self,dictkey='DataBaseValues')
- stream.stream2flaglist(self, userange=True, flagnumber=None, keystoflag=None, sensorid=None, comment=None)
- stream.trim(self, starttime=None, endtime=None, newway=False):
- stream.variometercorrection(self, variopath, thedate, **kwargs):
- stream.write(self, filepath, **kwargs):
Application methods:
----------------------------
- stream.aic_calc(key) -- returns stream (with !var2! filled with aic values)
- stream.baseline() -- calculates baseline correction for input stream (datastream)
- stream.dailymeans() -- for DI stream - obtains variometer corrected means fo basevalues
- stream.date_offset() -- Corrects the time column of the selected stream by the offst
- stream.delta_f() -- Calculates the difference of x+y+z to f
- stream.differentiate() -- returns stream (with !dx!,!dy!,!dz!,!df! filled by derivatives)
- stream.extrapolate() -- read absolute stream and extrapolate the data
- stream.fit(keys) -- returns function
- stream.filter() -- returns stream (changes sampling_period; in case of fmi ...)
- stream.find_offset(stream_a, stream_b) -- Finds offset of two data streams. (Not optimised.)
- stream.flag_stream() -- Add flags to specific times or time ranges
- stream.func2stream() -- Combine stream and function (add, subtract, etc)
- stream.func_add() -- Add a function to the selected values of the data stream
- stream.func_subtract() -- Subtract a function from the selected values of the data stream
- stream.get_gaps() -- Takes the dominant sample frequency and fills non-existing time steps
- stream.get_sampling_period() -- returns the dominant sampling frequency in unit ! days !
- stream.integrate() -- returns stream (integrated vals at !dx!,!dy!,!dz!,!df!)
- stream.interpol(keys) -- returns function
- stream.k_fmi() -- Calculating k values following the fmi approach
- stream.linestruct2ndarray() -- converts linestrcut data to ndarray. should be avoided
- stream.mean() -- Calculates mean values for the specified key, Nan's are regarded for
- stream.offset() -- Apply constant offsets to elements of the datastream
- stream.plot() -- plot keys from stream
- stream.powerspectrum() -- Calculating the power spectrum following the numpy fft example
- stream.remove_flagged() -- returns stream (removes data from stream according to flags)
- stream.resample(period) -- Resample stream to given sampling period.
- stream.rotation() -- Rotation matrix for rotating x,y,z to new coordinate system xs,ys,zs
- stream.selectkeys(keys) -- ndarray: remove all data except for provided keys (and flag/comment)
- stream.smooth(key) -- smooth the data using a window with requested size
- stream.spectrogram() -- Creates a spectrogram plot of selected keys
- stream.stream2flaglist() -- make flaglist out of stream
- stream.trim() -- returns stream within new time frame
- stream.use_sectime() -- Swap between primary and secondary time (if sectime is available)
- stream.variometercorrection() -- Obtain average DI values at certain timestep(s)
- stream.write() -- Writing Stream to a file
Supporting INTERNAL methods:
----------------------------
A. Standard functions and overrides for list like objects
- self.clear_header(self) -- Clears headers
- self.extend(self,datlst,header) -- Extends stream object
- self.sorting(self) -- Sorts object
B. Internal Methods I: Line & column functions
- self._get_column(key) -- returns a numpy array of selected columns from Stream
- self._put_column(key) -- adds a column to a Stream
- self._move_column(key, put2key) -- moves one column to another key
- self._clear_column(key) -- clears a column to a Stream
- self._get_line(self, key, value) -- returns a LineStruct element corresponding to the first occurence of value within the selected key
- self._reduce_stream(self) -- Reduces stream below a certain limit.
- self._remove_lines(self, key, value) -- removes lines with value within the selected key
- self.findtime(self,time) -- returns index and line for which time equals self.time
B. Internal Methods II: Data manipulation functions
- self._aic(self, signal, k, debugmode=None) -- returns float -- determines Akaki Information Criterion for a specific index k
- self._get_k(self, **kwargs) -- Calculates the k value according to the Bartels scale
- self._get_k_float(self, value, **kwargs) -- Like _get_k, but for testing single values and not full stream keys (used in filtered function)
- self._gf(self, t, tau): -- Gauss function
- self._hf(self, p, x) -- Harmonic function
- self._residual_func(self, func, y) -- residual of the harmonic function
- self._tau(self, period) -- low pass filter with -3db point at period in sec (e.g. 120 sec)
B. Internal Methods III: General utility & NaN handlers
- self._convertstream(self, coordinate, **kwargs) -- Convert coordinates of x,y,z columns in stream
- self._det_trange(self, period) -- starting with coefficients above 1%
- self._find_t_limits(self) -- return times of first and last stream data points
- self._testtime(time) -- returns datetime object
- self._get_min(key) -- returns float
- self._get_max(key) -- returns float
- self._normalize(column) -- returns list,float,float -- normalizes selected column to range 0,1
- nan_helper(self, y) -- Helper to handle indices and logical indices of NaNs
- self._print_key_headers(self) -- Prints keys in datastream with variable and unit.
- self._get_key_headers(self) -- Returns keys in datastream.
- self._drop_nans(self, key) -- Helper to drop lines with NaNs in any of the selected keys.
- self._is_number(self, s) -- ?
Supporting EXTERNAL methods:
----------------------------
Useful functions:
- array2stream -- returns a data stream -- converts a list of arrays to a datastream
- linestruct2ndarray -- returns a data ndarray -- converts a old linestruct format
- denormalize -- returns list -- (column,startvalue,endvalue) denormalizes selected column from range 0,1 ro sv,ev
- find_nearest(array, value) -- find point in array closest to value
- maskNAN(column) -- Tests for NAN values in array and usually masks them
- nearestPow2(x) -- Find power of two nearest to x
*********************************************************************
Standard function description format:
DEFINITION:
Description of function purpose and usage.
PARAMETERS:
Variables:
- variable: (type) Description.
Kwargs:
- variable: (type) Description.
RETURNS:
- variable: (type) Description.
EXAMPLE:
>>> alldata = mergeStreams(pos_stream, lemi_stream, keys=['<KEY>'])
APPLICATION:
Code for simple application.
*********************************************************************
Standard file description format:
Path: *path* (magpy.acquisition.pos1protocol)
Part of package: *package* (acquisition)
Type: *type* (type of file/package)
PURPOSE:
Description...
CONTAINS:
*ThisClass: (Class)
What is this class for?
thisFunction: (Func) Description
DEPENDENCIES:
List all non-standard packages required for file.
+ paths of all MagPy package dependencies.
CALLED BY:
Path to magpy packages that call this part, e.g. magpy.bin.acquisition
*********************************************************************
"""
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
def __init__(self, container=None, header={},ndarray=None):
if container is None:
container = []
self.container = container
if ndarray is None:
ndarray = np.array([np.asarray([]) for elem in KEYLIST])
self.ndarray = ndarray ## Test this! -> for better memory efficiency
#if header is None:
# header = {'Test':'Well, it works'}
#header = {}
self.header = header
#for key in KEYLIST:
# setattr(self,key,np.asarray([]))
#self.header = {'Test':'Well, it works'}
self.progress = 0
# ------------------------------------------------------------------------
# A. Standard functions and overrides for list like objects
# ------------------------------------------------------------------------
def ext(self, columnstructure): # new version of extend function for column operations
"""
the extend and add functions must be replaced in case of
speed optimization
"""
for key in KEYLIST:
self.container.key = np.append(self.container.key, columnstructure.key, 1)
def add(self, datlst):
#try:
assert isinstance(self.container, (list, tuple))
self.container.append(datlst)
#except:
# print list(self.container).append(datlst)
def length(self):
#try:
if len(self.ndarray[0]) > 0:
ll = [len(elem) for elem in self.ndarray]
return ll
else:
try: ## might fail if LineStruct is empty (no time)
if len(self) == 1 and np.isnan(self[0].time):
return [0]
else:
return [len(self)]
except:
return [0]
def replace(self, datlst):
# Replace in stream
# - replace value with existing data
# Method was used by K calc - replaced by internal method there
newself = DataStream()
assert isinstance(self.container, (list, tuple))
ti = list(self._get_column('time'))
try:
ind = ti.index(datlst.time)
except ValueError:
self = self.add(datlst)
return self
except:
return self
li = [elem for elem in self]
del li[ind]
del ti[ind]
li.append(datlst)
return DataStream(li,self.header)
def copy(self):
"""
DESCRIPTION:
method for copying content of a stream to a new stream
APPLICATION:
for non-destructive methods
"""
#print self.container
#assert isinstance(self.container, (list, tuple))
co = DataStream()
#co.header = self.header
newheader = {}
for el in self.header:
newheader[el] = self.header[el]
array = [[] for el in KEYLIST]
if len(self.ndarray[0])> 0:
for ind, key in enumerate(KEYLIST):
liste = []
for val in self.ndarray[ind]: ## This is necessary to really copy the content
liste.append(val)
array[ind] = np.asarray(liste)
co.container = [LineStruct()]
else:
for el in self:
li = LineStruct()
for key in KEYLIST:
if key == 'time':
li.time = el.time
else:
#exec('li.'+key+' = el.'+key)
elkey = getattr(el,key)
setattr(li, key, elkey)
co.add(li)
return DataStream(co.container,newheader,np.asarray(array, dtype=object))
def __str__(self):
return str(self.container)
def __repr__(self):
return str(self.container)
def __getitem__(self, var):
try:
if var in NUMKEYLIST:
return self.ndarray[self.KEYLIST.index(var)].astype(np.float64)
else:
return self.ndarray[self.KEYLIST.index(var)]
except:
return self.container.__getitem__(var)
def __setitem__(self, var, value):
self.ndarray[self.KEYLIST.index(var)] = value
def __len__(self):
return len(self.container)
def clear_header(self):
"""
Remove header information
"""
self.header = {}
def extend(self,datlst,header,ndarray):
array = [[] for key in KEYLIST]
self.container.extend(datlst)
self.header = header
# Some initial check if any data set except timecolumn is contained
datalength = len(ndarray)
#t1 = datetime.utcnow()
if pyvers and pyvers == 2:
ch1 = '-'.encode('utf-8') # not working with py3
ch2 = ''.encode('utf-8')
else:
ch1 = '-'
ch2 = ''
try:
test = []
for col in ndarray:
col = np.array(list(col))
#print (np.array(list(col)).dtype)
if col.dtype in ['float64','float32','int32','int64']:
try:
x = np.asarray(col)[~np.isnan(col)]
except: # fallback 1 -> should not needed any more
#print ("Fallback1")
x = np.asarray([elem for elem in col if not np.isnan(elem)])
else:
#y = np.asarray(col)[col!='-']
#x = np.asarray(y)[y!='']
y = np.asarray(col)[col!=ch1]
x = np.asarray(y)[y!=ch2]
test.append(x)
test = np.asarray(test,dtype=object)
except:
# print ("Fallback -- pretty slowly")
#print ("Fallback2")
test = [[elem for elem in col if not elem in [ch1,ch2]] for col in ndarray]
#t2 = datetime.utcnow()
#print (t2-t1)
emptycnt = [len(el) for el in test if len(el) > 0]
if self.ndarray.size == 0:
self.ndarray = ndarray
elif len(emptycnt) == 1:
print("Tyring to extend with empty data set")
#self.ndarray = np.asarray((list(self.ndarray)).extend(list(ndarray)))
else:
for idx,elem in enumerate(self.ndarray):
if len(ndarray[idx]) > 0:
if len(self.ndarray[idx]) > 0 and len(self.ndarray[0]) > 0:
array[idx] = np.append(self.ndarray[idx], ndarray[idx]).astype(object)
#array[idx] = np.append(self.ndarray[idx], ndarray[idx],1).astype(object)
elif len(self.ndarray[0]) > 0: # only time axis present so far but no data within this elem
fill = ['-']
key = KEYLIST[idx]
if key in NUMKEYLIST or key=='sectime':
fill = [float('nan')]
nullvals = np.asarray(fill * len(self.ndarray[0]))
#array[idx] = np.append(nullvals, ndarray[idx],1).astype(object)
array[idx] = np.append(nullvals, ndarray[idx]).astype(object)
else:
array[idx] = ndarray[idx].astype(object)
self.ndarray = np.asarray(array, dtype=object)
def union(self,column):
seen = set()
seen_add = seen.add
return [ x for x in column if not (x in seen or seen_add(x))]
def removeduplicates(self):
"""
DESCRIPTION:
Identify duplicate time stamps and remove all data.
Lines with first occurence are kept.
"""
# get duplicates in time column
def list_duplicates(seq):
seen = set()
seen_add = seen.add
return [idx for idx,item in enumerate(seq) if item in seen or seen_add(item)]
if not len(self.ndarray[0]) > 0:
print ("removeduplicates: works only with ndarrays")
return
duplicateindicies = list_duplicates(self.ndarray[0])
array = [[] for key in KEYLIST]
for idx, elem in enumerate(self.ndarray):
if len(elem) > 0:
newelem = np.delete(elem, duplicateindicies)
array[idx] = newelem
return DataStream(self, self.header, np.asarray(array,dtype=object))
def start(self, dateformt=None):
st,et = self._find_t_limits()
return st
def end(self, dateformt=None):
st,et = self._find_t_limits()
return et
def findtime(self,time,**kwargs):
"""
DEFINITION:
Find a line within the container which contains the selected time step
or the first line following this timestep (since 0.3.99 using mode 'argmax')
VARIABLES:
startidx (int) index to start search with (speeding up)
endidx (int) index to end search with (speeding up)
mode (string) define search mode (fastest would be 'argmax')
RETURNS:
The index position of the line and the line itself
"""
startidx = kwargs.get('startidx')
endidx = kwargs.get('endidx')
mode = kwargs.get('mode')
#try:
# from bisect import bisect
#except ImportError:
# print("Import error")
st = date2num(self._testtime(time))
if len(self.ndarray[0]) > 0:
if startidx and endidx:
ticol = self.ndarray[0][startidx:endidx]
elif startidx:
ticol = self.ndarray[0][startidx:]
elif endidx:
ticol = self.ndarray[0][:endidx]
else:
ticol = self.ndarray[0]
try:
if mode =='argmax':
## much faster since 0.3.99 (used in flag_stream)
indexes = [np.argmax(ticol>=st)]
else:
## the following method is used until 0.3.98
indexes = [i for i,x in enumerate(ticol) if x == st] ### FASTER
# Other methods
# #############
#indexes = [i for i,x in enumerate(ticol) if np.allclose(x,st,rtol=1e-14,atol=1e-17)] # if the two time equal within about 0.7 milliseconds
#indexes = [bisect(ticol, st)] ## SELECTS ONLY INDEX WHERE VALUE SHOULD BE inserted
#indexes = [ticol.index(st)]
#print("findtime", indexes)
if not len(indexes) == 0:
if startidx:
retindex = indexes[0] + startidx
else:
retindex = indexes[0]
#print("Findtime index:",retindex)
return retindex, LineStruct()
else:
return 0, []
#return list(self.ndarray[0]).index(st), LineStruct()
except:
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
for index, line in enumerate(self):
if line.time == st:
return index, line
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
def _find_t_limits(self):
"""
DEFINITION:
Find start and end times in stream.
RETURNS:
Two datetime objects, start and end.
"""
if len(self.ndarray[0]) > 0:
t_start = num2date(np.min(self.ndarray[0].astype(float))).replace(tzinfo=None)
t_end = num2date(np.max(self.ndarray[0].astype(float))).replace(tzinfo=None)
else:
try: # old type
t_start = num2date(self[0].time).replace(tzinfo=None)
t_end = num2date(self[-1].time).replace(tzinfo=None)
except: # empty
t_start,t_end = None,None
return t_start, t_end
def _print_key_headers(self):
print("%10s : %22s : %28s" % ("MAGPY KEY", "VARIABLE", "UNIT"))
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
except:
header = None
try:
unit = self.header['unit-col-'+key]
except:
unit = None
print("%10s : %22s : %28s" % (key, header, unit))
def _get_key_headers(self,**kwargs):
"""
DEFINITION:
get a list of existing numerical keys in stream.
PARAMETERS:
kwargs:
- limit: (int) limit the lenght of the list
- numerical: (bool) if True, select only numerical keys
RETURNS:
- keylist: (list) a list like ['x','y','z']
EXAMPLE:
>>> data_stream._get_key_headers(limit=1)
"""
limit = kwargs.get('limit')
numerical = kwargs.get('numerical')
if numerical:
TESTLIST = FLAGKEYLIST
else:
TESTLIST = KEYLIST
keylist = []
"""
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
try:
unit = self.header['unit-col-'+key]
except:
unit = None
keylist.append(key)
except:
header = None
"""
if not len(keylist) > 0: # e.g. Testing ndarray
for ind,elem in enumerate(self.ndarray): # use the long way
if len(elem) > 0 and ind < len(TESTLIST):
if not TESTLIST[ind] == 'time':
keylist.append(TESTLIST[ind])
if not len(keylist) > 0: # e.g. header col-? does not contain any info
#for key in FLAGKEYLIST[1:]: # use the long way
for key in TESTLIST[1:]: # use the long way
col = self._get_column(key)
if len(col) > 0:
#if not len(col) == 1 and not ( # maybe add something to prevent reading empty LineStructs)
if len(col) == 1:
if col[0] in ['-',float(nan),'']:
pass
else:
keylist.append(key)
if limit and len(keylist) > limit:
keylist = keylist[:limit]
return keylist
def _get_key_names(self):
"""
DESCRIPTION:
get the variable names for each key
APPLICATION:
keydict = self._get_key_names()
"""
keydict = {}
for key in KEYLIST:
kname = self.header.get('col-'+key)
keydict[kname] = key
return keydict
def dropempty(self):
"""
DESCRIPTION:
Drop empty arrays from ndarray and store their positions
"""
if not len(self.ndarray[0]) > 0:
return self.ndarray, np.asarray([])
newndarray = []
indexarray = []
for ind,elem in enumerate(self.ndarray):
if len(elem) > 0:
newndarray.append(np.asarray(elem).astype(object))
indexarray.append(ind)
keylist = [el for ind,el in enumerate(KEYLIST) if ind in indexarray]
return np.asarray(newndarray), keylist
def fillempty(self, ndarray, keylist):
"""
DESCRIPTION:
Fills empty arrays into ndarray at all position of KEYLIST not provided in keylist
"""
if not len(ndarray[0]) > 0:
return self
if len(self.ndarray) == KEYLIST:
return self
lst = list(ndarray)
for i,key in enumerate(KEYLIST):
if not key in keylist:
lst.insert(i,[])
newndarray = np.asarray(lst,dtype=object)
return newndarray
def sorting(self):
"""
Sorting data according to time (maybe generalize that to some key)
"""
try: # old LineStruct part
liste = sorted(self.container, key=lambda tmp: tmp.time)
except:
pass
if len(self.ndarray[0]) > 0:
self.ndarray, keylst = self.dropempty()
#self.ndarray = self.ndarray[:, np.argsort(self.ndarray[0])] # does not work if some rows have a different length)
ind = np.argsort(self.ndarray[0])
for i,el in enumerate(self.ndarray):
if len(el) == len(ind):
self.ndarray[i] = el[ind]
else:
#print("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("len(t-axis)=%d len(%s)=%d" % (len(self.ndarray[0]), KEYLIST[i], len(self.ndarray[i])))
self.ndarray[i] = np.empty(len(self.ndarray[0])) * np.nan
self.ndarray = self.fillempty(self.ndarray,keylst)
for idx,el in enumerate(self.ndarray):
self.ndarray[idx] = np.asarray(self.ndarray[idx]).astype(object)
else:
self.ndarray = self.ndarray
return DataStream(liste, self.header, self.ndarray)
# ------------------------------------------------------------------------
# B. Internal Methods: Line & column functions
# ------------------------------------------------------------------------
def _get_line(self, key, value):
"""
returns a LineStruct elemt corresponding to the first occurence of value within the selected key
e.g.
st = st._get_line('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lines = [elem for elem in self if eval('elem.'+key) == value]
return lines[0]
def _take_columns(self, keys):
"""
DEFINITION:
extract selected columns of the given keys (Old LineStruct format - decrapted)
"""
resultstream = DataStream()
for elem in self:
line = LineStruct()
line.time = elem.time
resultstream.add(line)
resultstream.header = {}
for key in keys:
if not key in KEYLIST:
pass
elif not key == 'time':
col = self._get_column(key)
#print key, len(col)
try:
resultstream.header['col-'+key] = self.header['col-'+key]
except:
pass
try:
resultstream.header['unit-col-'+key] = self.header['unit-col-'+key]
except:
pass
resultstream = resultstream._put_column(col,key)
return resultstream
def _remove_lines(self, key, value):
"""
removes lines with value within the selected key
e.g.
st = st._remove_lines('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lst = [elem for elem in self if not eval('elem.'+key) == value]
return DataStream(lst, self.header)
def _get_column(self, key):
"""
Returns a numpy array of selected column from Stream
Example:
columnx = datastream._get_column('x')
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
# Speeded up this technique:
ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
try:
col = self[key]
except:
col = self.ndarray[ind]
return col
# Check for initialization value
#testval = self[0][ind]
# if testval == KEYINITDICT[key] or isnan(testval):
# return np.asarray([])
try:
col = np.asarray([row[ind] for row in self])
#get the first ten elements and test whether nan is there -- why ??
"""
try: # in case of string....
novalfound = True
for ele in col[:10]:
if not isnan(ele):
novalfound = False
if novalfound:
return np.asarray([])
except:
return col
"""
return col
except:
return np.asarray([])
def _put_column(self, column, key, **kwargs):
"""
DEFINITION:
adds a column to a Stream
PARAMETERS:
column: (array) single list with data with equal length as stream
key: (key) key to which the data is written
Kwargs:
columnname: (string) define a name
columnunit: (string) define a unit
RETURNS:
- DataStream object
EXAMPLE:
>>> stream = stream._put_column(res, 't2', columnname='Rain',columnunit='mm in 1h')
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add(float('NaN'))
columnname = kwargs.get('columnname')
columnunit = kwargs.get('columnunit')
if not key in KEYLIST:
raise ValueError("Column key not valid")
if len(self.ndarray[0]) > 0:
ind = KEYLIST.index(key)
self.ndarray[ind] = np.asarray(column)
else:
if not len(column) == len(self):
raise ValueError("Column length does not fit Datastream")
for idx, elem in enumerate(self):
setattr(elem, key, column[idx])
if not columnname:
try: # TODO correct that
if eval('self.header["col-%s"]' % key) == '':
exec('self.header["col-%s"] = "%s"' % (key, key))
except:
pass
else:
exec('self.header["col-%s"] = "%s"' % (key, columnname))
if not columnunit:
try: # TODO correct that
if eval('self.header["unit-col-%s"]' % key) == '':
exec('self.header["unit-col-%s"] = "arb"' % (key))
except:
pass
else:
exec('self.header["unit-col-%s"] = "%s"' % (key, columnunit))
return self
def _move_column(self, key, put2key):
'''
DEFINITION:
Move column of key "key" to key "put2key".
Simples.
PARAMETERS:
Variables:
- key: (str) Key to be moved.
- put2key: (str) Key for 'key' to be moved to.
RETURNS:
- stream: (DataStream) DataStream object.
EXAMPLE:
>>> data_stream._move_column('f', 'var1')
'''
if not key in KEYLIST:
logger.error("_move_column: Column key %s not valid!" % key)
if key == 'time':
logger.error("_move_column: Cannot move time column!")
if not put2key in KEYLIST:
logger.error("_move_column: Column key %s (to move %s to) is not valid!" % (put2key,key))
if len(self.ndarray[0]) > 0:
col = self._get_column(key)
self =self._put_column(col,put2key)
return self
try:
for i, elem in enumerate(self):
exec('elem.'+put2key+' = '+'elem.'+key)
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
try:
exec('self.header["col-%s"] = self.header["col-%s"]' % (put2key, key))
exec('self.header["unit-col-%s"] = self.header["unit-col-%s"]' % (put2key, key))
exec('self.header["col-%s"] = None' % (key))
exec('self.header["unit-col-%s"] = None' % (key))
except:
logger.error("_move_column: Error updating headers.")
logger.info("_move_column: Column %s moved to column %s." % (key, put2key))
except:
logger.error("_move_column: It's an error.")
return self
def _drop_column(self,key):
"""
remove a column of a Stream
"""
ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
try:
self.ndarray[ind] = np.asarray([])
except:
# Some array don't allow that, shape error e.g. PYSTRING -> then use this
array = [np.asarray(el) if idx is not ind else np.asarray([]) for idx,el in enumerate(self.ndarray)]
self.ndarray = np.asarray(array,dtype=object)
colkey = "col-%s" % key
colunitkey = "unit-col-%s" % key
try:
self.header.pop(colkey, None)
self.header.pop(colunitkey, None)
except:
print("_drop_column: Error while dropping header info")
else:
print("No data available or LineStruct type (not supported)")
return self
def _clear_column(self, key):
"""
remove a column to a Stream
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add(float('NaN'))
if not key in KEYLIST:
raise ValueError("Column key not valid")
for idx, elem in enumerate(self):
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
return self
def _reduce_stream(self, pointlimit=100000):
"""
DEFINITION:
Reduces size of stream by picking for plotting methods to save memory
when plotting large data sets.
Does NOT filter or smooth!
This function purely removes data points (rows) in a
periodic fashion until size is <100000 data points.
(Point limit can also be defined.)
PARAMETERS:
Kwargs:
- pointlimit: (int) Max number of points to include in stream. Default is 100000.
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
EXAMPLE:
>>> lessdata = ten_Hz_data._reduce_stream(pointlimit=500000)
"""
size = len(self)
div = size/pointlimit
divisor = math.ceil(div)
count = 0.
lst = []
if divisor > 1.:
for elem in self:
if count%divisor == 0.:
lst.append(elem)
count += 1.
else:
logger.warning("_reduce_stream: Stream size (%s) is already below pointlimit (%s)." % (size,pointlimit))
return self
logger.info("_reduce_stream: Stream size reduced from %s to %s points." % (size,len(lst)))
return DataStream(lst, self.header)
def _remove_nancolumns(self):
"""
DEFINITION:
Remove any columsn soley filled with nan values
APPLICATION:
called by plot methods in mpplot
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
"""
array = [[] for key in KEYLIST]
if len(self.ndarray[0]) > 0:
for idx, elem in enumerate(self.ndarray):
if len(self.ndarray[idx]) > 0 and KEYLIST[idx] in NUMKEYLIST:
lst = list(self.ndarray[idx])
#print KEYLIST[idx],lst[0]
if lst[1:] == lst[:-1] and np.isnan(float(lst[0])):
array[idx] = np.asarray([])
else:
array[idx] = self.ndarray[idx]
else:
array[idx] = self.ndarray[idx]
else:
pass
return DataStream(self,self.header,np.asarray(array,dtype=object))
# ------------------------------------------------------------------------
# B. Internal Methods: Data manipulation functions
# ------------------------------------------------------------------------
def _aic(self, signal, k, debugmode=None):
try:
aicval = (k-1)* np.log(np.var(signal[:k]))+(len(signal)-k-1)*np.log(np.var(signal[k:]))
except:
if debugmode:
logger.debug('_AIC: could not evaluate AIC at index position %i' % (k))
pass
return aicval
def harmfit(self,nt, val, fitdegree):
# method for harminic fit according to Phil McFadden's fortran program
"""
DEFINITION:
Method for harmonic fit according to <NAME>en's fortran program
Used by k-value determination
PARAMETERS:
Kwargs:
- nt: (list) Normalized time array.
- val: (list) Value list.
- fitdegree: (int) hramonic degree default is 5.
RETURNS:
- newval: (array) an array with fitted values of length(val).
EXAMPLE:
>>> f_fit = self.harmfit(nt,val, 5)
"""
N = len(nt)
coeff = (val[-1]-val[0]) /(nt[-1]-nt[0])
newval = [elem-coeff*(nt[i]-nt[0]) for i, elem in enumerate(val)]
ReVal = []
ImVal = []
for h in range(0,fitdegree):
ReVal.append(newval[0])
ImVal.append(0.0)
angle = -h*(2.0*np.pi/N)
for i in range(1,len(newval)):
si = np.sin(i*angle)
co = np.cos(i*angle)
ReVal[h] = ReVal[h] + newval[i]*co
ImVal[h] = ImVal[h] + newval[i]*si
#print "Parameter:", len(newval)
#print len(ReVal), ReVal
angle = 2.0*np.pi*(float(N-1)/float(N))/(nt[-1]-nt[0])
harmval = []
for i,elem in enumerate(newval):
harmval.append(ReVal[0])
angle2 = (nt[i]-nt[0])*angle
for h in range(1,fitdegree):
si = np.sin(h*angle2)
co = np.cos(h*angle2)
harmval[i] = harmval[i]+(2.0*(ReVal[h]*co-ImVal[h]*si))
harmval[i] = harmval[i]/float(N)+coeff*(nt[i]-nt[0])
return np.asarray(harmval)
def _get_max(self, key, returntime=False):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
t_ind = KEYLIST.index('time')
if len(self.ndarray[0]) > 0:
result = np.nanmax(self.ndarray[key_ind].astype(float))
ind = np.nanargmax(self.ndarray[key_ind].astype(float))
tresult = self.ndarray[t_ind][ind]
else:
elem = max(self, key=lambda tmp: eval('tmp.'+key))
result = eval('elem.'+key)
tresult = elem.time
if returntime:
return result, tresult
else:
return result
def _get_min(self, key, returntime=False):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
t_ind = KEYLIST.index('time')
if len(self.ndarray[0]) > 0:
result = np.nanmin(self.ndarray[key_ind].astype(float))
ind = np.nanargmin(self.ndarray[key_ind].astype(float))
tresult = self.ndarray[t_ind][ind]
else:
elem = min(self, key=lambda tmp: eval('tmp.'+key))
result = eval('elem.'+key)
tresult = elem.time
if returntime:
return result, tresult
else:
return result
def _get_variance(self, key):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
result = np.nanvar(self.ndarray[key_ind].astype(float))
return result
def amplitude(self,key):
"""
DESCRIPTION:
calculates maximum-minimum difference of the keys timeseries
REQUIRES:
_get_column()
RETURNS:
float: difference between maximum and minimim value in time range
APPLICATION
amp = stream.amplitude('x')
"""
ts = self._get_column(key).astype(float)
ts = ts[~np.isnan(ts)]
maxts = np.max(ts)
mints = np.min(ts)
return maxts-mints
def _gf(self, t, tau):
"""
Gauss function
"""
return np.exp(-((t/tau)*(t/tau))/2)
def _hf(self, p, x):
"""
Harmonic function
"""
hf = p[0]*cos(2*pi/p[1]*x+p[2]) + p[3]*x + p[4] # Target function
return hf
def _residual_func(self, func, y):
"""
residual of the harmonic function
"""
return y - func
def _tau(self, period, fac=0.83255461):
"""
low pass filter with -3db point at period in sec (e.g. 120 sec)
1. convert period from seconds to days as used in daytime
2. return tau (in unit "day")
- The value of 0.83255461 is obtained for -3db (see IAGA Guide)
"""
per = period/(3600*24)
return fac*per/(2*np.pi)
# ------------------------------------------------------------------------
# B. Internal Methods: General utility & NaN handlers
# ------------------------------------------------------------------------
def _convertstream(self, coordinate, **kwargs):
"""
DESCRIPTION:
Convert coordinates of x,y,z columns in other
coordinate system:
- xyz2hdz
- xyz2idf
- hdz2xyz
- idf2xyz
Helper method which call the tranformation routines
APPLICATION:
used by k_fmi, variocorrection
"""
ext = ''
if len(self.ndarray[4]) > 0:
ext = 'F'
if len(self.ndarray[KEYLIST.index('df')]) > 0:
ext = 'G'
if len(self.ndarray[0]) > 0:
if coordinate == 'xyz2hdz':
self = self.xyz2hdz()
self.header['DataComponents'] = 'HDZ'+ext
elif coordinate == 'xyz2idf':
self = self.xyz2idf()
self.header['DataComponents'] = 'IDF'+ext
elif coordinate == 'hdz2xyz':
self = self.hdz2xyz()
self.header['DataComponents'] = 'XYZ'+ext
elif coordinate == 'idf2xyz':
self = self.idf2xyz()
self.header['DataComponents'] = 'XYZ'+ext
elif coordinate == 'idf2hdz':
self = self.idf2xyz()
self = self.xyz2hdz()
self.header['DataComponents'] = 'HDZ'+ext
elif coordinate == 'hdz2idf':
self = self.hdz2xyz()
self = self.xyz2idf()
self.header['DataComponents'] = 'IDF'+ext
else:
print("_convertstream: unkown coordinate transform")
return self
keep_header = kwargs.get('keep_header')
outstream = DataStream()
for elem in self:
row=LineStruct()
exec('row = elem.'+coordinate+'(unit="deg")')
row.typ = ''.join((list(coordinate))[4:])+'f'
outstream.add(row)
if not keep_header:
outstream.header['col-x'] = (list(coordinate))[4]
outstream.header['col-y'] = (list(coordinate))[5]
outstream.header['col-z'] = (list(coordinate))[6]
if (list(coordinate))[4] in ['i','d']:
outstream.header['unit-col-x'] = 'deg'
else:
outstream.header['unit-col-x'] = 'nT'
if (list(coordinate))[5] in ['i','d']:
outstream.header['unit-col-y'] = 'deg'
else:
outstream.header['unit-col-y'] = 'nT'
if (list(coordinate))[6] in ['i','d']:
outstream.header['unit-col-z'] = 'deg'
else:
outstream.header['unit-col-z'] = 'nT'
return DataStream(outstream,outstream.header)
def _delete(self,index):
"""
DESCRIPTION:
Helper method to delete all values at a specific index or range of indicies
from the ndarray
APPLICTAION:
Used by k_fmi with individual indicies
"""
for i,array in enumerate(self.ndarray):
if isinstance( index, (int) ): # removed long (not necessary for python3, error in win)
if len(array) > index:
self.ndarray[i] = np.delete(self.ndarray[i],index)
else:
self.ndarray[i] = np.delete(self.ndarray[i],index)
return self
def _append(self,stream):
"""
DESCRIPTION:
Helper method to append values from another stream to
a ndarray. Append only to columns already filled in self.
APPLICTAION:
Used by k_fmi
"""
for i,array in enumerate(self):
if len(array) > 0:
self.ndarray[i] = np.append(self.ndarray[i],stream.ndarray[i])
return self
def _det_trange(self, period):
"""
starting with coefficients above 1%
is now returning a timedelta object
"""
return np.sqrt(-np.log(0.01)*2)*self._tau(period)
def _is_number(self, s):
"""
Test whether s is a number
"""
if str(s) in ['','None',None]:
return False
try:
float(s)
return True
except ValueError:
return False
def _normalize(self, column):
"""
normalizes the given column to range [0:1]
"""
normcol = []
column = column.astype(float)
maxval = np.max(column)
minval = np.min(column)
for elem in column:
normcol.append((elem-minval)/(maxval-minval))
return normcol, minval, maxval
def _testtime(self, time):
"""
Check the date/time input and returns a datetime object if valid:
! Use UTC times !
- accepted are the following inputs:
1) absolute time: as provided by date2num
2) strings: 2011-11-22 or 2011-11-22T11:11:00
3) datetime objects by datetime.datetime e.g. (datetime(2011,11,22,11,11,00)
"""
if isinstance(time, float) or isinstance(time, int):
try:
timeobj = num2date(time).replace(tzinfo=None)
except:
raise TypeError
elif isinstance(time, str): # test for str only in Python 3 should be basestring for 2.x
try:
timeobj = datetime.strptime(time,"%Y-%m-%d")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%dT%H:%M:%S")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%d %H:%M:%S.%f")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%dT%H:%M:%S.%f")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%d %H:%M:%S")
except:
try:
# Not happy with that but necessary to deal
# with old 1000000 micro second bug
timearray = time.split('.')
if timearray[1] == '1000000':
timeobj = datetime.strptime(timearray[0],"%Y-%m-%d %H:%M:%S")+timedelta(seconds=1)
else:
# This would be wrong but leads always to a TypeError
timeobj = datetime.strptime(timearray[0],"%Y-%m-%d %H:%M:%S")
except:
try:
timeobj = num2date(float(time)).replace(tzinfo=None)
except:
raise TypeError
elif not isinstance(time, datetime):
raise TypeError
else:
timeobj = time
return timeobj
def _drop_nans(self, key):
"""
DEFINITION:
Helper to drop all lines when NaNs or INFs are found within the selected key
RETURNS:
- DataStream: (DataStream object) a new data stream object with out identified lines.
EXAMPLE:
>>> newstream = stream._drop_nans('x')
APPLICATION:
used for plotting and fitting of data
"""
array = [np.asarray([]) for elem in KEYLIST]
if len(self.ndarray[0]) > 0 and key in NUMKEYLIST:
ind = KEYLIST.index(key)
#indicieslst = [i for i,el in enumerate(self.ndarray[ind].astype(float)) if np.isnan(el) or np.isinf(el)]
ar = np.asarray(self.ndarray[ind]).astype(float)
indicieslst = []
for i,el in enumerate(ar):
if np.isnan(el) or np.isinf(el):
indicieslst.append(i)
searchlist = ['time']
searchlist.extend(NUMKEYLIST)
for index,tkey in enumerate(searchlist):
if len(self.ndarray[index])>0: # Time column !!! -> index+1
array[index] = np.delete(self.ndarray[index], indicieslst)
#elif len(self.ndarray[index+1])>0:
# array[index+1] = self.ndarray[index+1]
newst = [LineStruct()]
else:
newst = [elem for elem in self if not isnan(eval('elem.'+key)) and not isinf(eval('elem.'+key))]
return DataStream(newst,self.header,np.asarray(array,dtype=object))
def _select_keys(self, keys):
"""
DESCRIPTION
Non-destructive method to select provided keys from Data stream.
APPLICATION:
streamxy = streamyxzf._select_keys(['x','y'])
"""
result = self.copy()
try:
if not len(keys) > 0:
return self
except:
return self
"""
print ("sel", keys)
if not 'time' in keys:
keys.append('time')
print ("sel", keys)
"""
ndarray = [[] for key in KEYLIST]
ndarray = np.asarray([np.asarray(elem) if KEYLIST[idx] in keys or KEYLIST[idx] == 'time' else np.asarray([]) for idx,elem in enumerate(result.ndarray)])
return DataStream([LineStruct()],result.header,ndarray)
def _select_timerange(self, starttime=None, endtime=None, maxidx=-1):
"""
DESCRIPTION
Non-destructive method to select a certain time range from a stream.
Similar to trim, leaving the original stream unchanged however.
APPLICATION:
Used by write
"""
ndarray = [[] for key in KEYLIST]
# Use a different technique
# copy all data to array and then delete everything below and above
#t1 = datetime.utcnow()
#ndarray = self.ndarray
startindices = []
endindices = []
if starttime:
starttime = self._testtime(starttime)
if self.ndarray[0].size > 0: # time column present
if maxidx > 0:
idx = (np.abs(self.ndarray[0][:maxidx]-date2num(starttime))).argmin()
else:
idx = (np.abs(self.ndarray[0]-date2num(starttime))).argmin()
# Trim should start at point >= starttime, so check:
if self.ndarray[0][idx] < date2num(starttime):
idx += 1
startindices = list(range(0,idx))
if endtime:
endtime = self._testtime(endtime)
if self.ndarray[0].size > 0: # time column present
#print "select timerange", maxidx
if maxidx > 0: # truncate the ndarray
#print maxidx
#tr = self.ndarray[0][:maxidx].astype(float)
idx = 1 + (np.abs(self.ndarray[0][:maxidx].astype(float)-date2num(endtime))).argmin() # get the nearest index to endtime and add 1 (to get lenghts correctly)
else:
idx = 1 + (np.abs(self.ndarray[0].astype(float)-date2num(endtime))).argmin() # get the nearest index to endtime and add 1 (to get lenghts correctly)
if idx >= len(self.ndarray[0]): ## prevent too large idx values
idx = len(self.ndarray[0]) # - 1
try: # using try so that this test is passed in case of idx == len(self.ndarray)
endnum = date2num(endtime)
#print ("Value now", idx, self.ndarray[0][idx], date2num(endtime))
if self.ndarray[0][idx] > endnum and self.ndarray[0][idx-1] < endnum:
# case 1: value at idx is larger, value at idx-1 is smaller -> use idx
pass
elif self.ndarray[0][idx] == endnum:
# case 2: value at idx is endnum -> use idx
pass
elif not self.ndarray[0][idx] <= endnum:
# case 3: value at idx-1 equals endnum -> use idx-1
idx -= 1
#print ("Value now b", idx, self.ndarray[0][idx], date2num(endtime))
#if not self.ndarray[0][idx] <= date2num(endtime):
# # Make sure that last value is either identical to endtime (if existing or one index larger)
# # This is important as from this index on, data is removed
# idx -= 1
# print ("Value now", idx, self.ndarray[0][idx], date2num(endtime))
# print ("Value now", idx, self.ndarray[0][idx+1], date2num(endtime))
except:
pass
endindices = list(range(idx,len(self.ndarray[0])))
indices = startindices + endindices
#t2 = datetime.utcnow()
#print "_select_timerange - getting t range needed:", t2-t1
if len(startindices) > 0:
st = startindices[-1]+1
else:
st = 0
if len(endindices) > 0:
ed = endindices[0]
else:
ed = len(self.ndarray[0])
for i in range(len(self.ndarray)):
ndarray[i] = self.ndarray[i][st:ed] ## This is the correct length
#t3 = datetime.utcnow()
#print "_select_timerange - deleting :", t3-t2
return np.asarray(ndarray,dtype=object)
# ------------------------------------------------------------------------
# C. Application methods
# (in alphabetical order)
# ------------------------------------------------------------------------
def aic_calc(self, key, **kwargs):
"""
DEFINITION:
Picking storm onsets using the Akaike Information Criterion (AIC) picker
- extract one dimensional array from DataStream (e.g. H) -> signal
- take the first k values of the signal and calculates variance and log
- plus the rest of the signal (variance and log)
NOTE: Best results come from evaluating two data series - one with original
data, one of same data with AIC timerange offset by timerange/2 to cover
any signals that may occur at the points between evaluations.
PARAMETERS:
Variables:
- key: (str) Key to check. Needs to be an element of KEYLIST.
Kwargs:
- timerange: (timedelta object) defines the length of the time window
examined by the aic iteration. (default: timedelta(hours=1).)
- aic2key: (str) defines the key of the column where to save the aic values
(default = var2).
- aicmin2key: (str) defines the key of the column where to save the aic minimum val
(default: key = var1.)
- aicminstack: (bool) if true, aicmin values are added to previously present column values.
RETURNS:
- self: (DataStream object) Stream with results in default var1 + var2 keys.
EXAMPLE:
>>> stream = stream.aic_calc('x',timerange=timedelta(hours=0.5))
APPLICATION:
from magpy.stream import read
stream = read(datapath)
stream = stream.aic_calc('x',timerange=timedelta(hours=0.5))
stream = stream.differentiate(keys=['var2'],put2keys=['var3'])
stream_filt = stream.extract('var1',200,'>')
stream_new = stream_file.eventlogger('var3',[30,40,60],'>',addcomment=True)
stream = mergeStreams(stream,stream_new,key='comment')
"""
timerange = kwargs.get('timerange')
aic2key = kwargs.get('aic2key')
aicmin2key = kwargs.get('aicmin2key')
aicminstack = kwargs.get('aicminstack')
if not timerange:
timerange = timedelta(hours=1)
if not aic2key:
aic2key = 'var2'
if not aicmin2key:
aicmin2key = 'var1'
t = self._get_column('time')
signal = self._get_column(key)
#Clear the projected results column
array = []
aic2ind = KEYLIST.index(aic2key)
self = self._clear_column(aic2key)
if len(self.ndarray[0]) > 0.:
self.ndarray[aic2ind] = np.empty((len(self.ndarray[0],)))
self.ndarray[aic2ind][:] = np.NAN
# get sampling interval for normalization - need seconds data to test that
sp = self.get_sampling_period()*24*60
# corrcet approach
iprev = 0
iend = 0
while iend < len(t)-1:
istart = iprev
ta, iend = find_nearest(np.asarray(t), date2num(num2date(t[istart]).replace(tzinfo=None) + timerange))
if iend == istart:
iend += 60 # approx for minute files and 1 hour timedelta (used when no data available in time range) should be valid for any other time range as well
else:
currsequence = signal[istart:iend]
aicarray = []
for idx, el in enumerate(currsequence):
if idx > 1 and idx < len(currsequence):
# CALCULATE AIC
aicval = self._aic(currsequence, idx)/timerange.seconds*3600 # *sp Normalize to sampling rate and timerange
if len(self.ndarray[0]) > 0:
self.ndarray[aic2ind][idx+istart] = aicval
else:
exec('self[idx+istart].'+ aic2key +' = aicval')
if not isnan(aicval):
aicarray.append(aicval)
# store start value - aic: is a measure for the significance of information change
#if idx == 2:
# aicstart = aicval
#self[idx+istart].var5 = aicstart-aicval
maxaic = np.max(aicarray)
# determine the relative amplitude as well
cnt = 0
for idx, el in enumerate(currsequence):
if idx > 1 and idx < len(currsequence):
# TODO: this does not yet work with ndarrays
try:
if aicminstack:
if not eval('isnan(self[idx+istart].'+aicmin2key+')'):
exec('self[idx+istart].'+ aicmin2key +' += (-aicarray[cnt] + maxaic)')
else:
exec('self[idx+istart].'+ aicmin2key +' = (-aicarray[cnt] + maxaic)')
else:
exec('self[idx+istart].'+ aicmin2key +' = (-aicarray[cnt] + maxaic)')
exec('self[idx+istart].'+ aicmin2key +' = maxaic')
cnt = cnt+1
except:
msg = "number of counts does not fit usually because of nans"
iprev = iend
self.header['col-var2'] = 'aic'
return self
def baseline(self, absolutedata, **kwargs):
"""
DESCRIPTION:
calculates baseline correction for input stream (datastream)
Uses available baseline values from the provided absolute file
Special cases:
1) Absolute data covers the full time range of the stream:
-> Absolute data is extrapolated by duplicating the last and first entry at "extradays" offset
-> desired function is calculated
2) No Absolute data for the end of the stream:
-> like 1: Absolute data is extrapolated by duplicating the last entry at "extradays" offset or end of stream
-> and info message is created, if timedifference exceeds the "extraday" arg then a warning will be send
2) No Absolute data for the beginning of the stream:
-> like 2: Absolute data is extrapolated by duplicating the first entry at "extradays" offset or beginning o stream
-> and info message is created, if timedifference exceeds the "extraday" arg then a warning will be send
VARIABLES:
required:
didata (DataStream) containing DI data- usually obtained by absolutes.absoluteAnalysis()
keywords:
plotbaseline (bool/string) will plot a baselineplot (if a valid path is provided
to file otherwise to to screen- requires mpplot
extradays (int) days to which the absolutedata is exteded prior and after start and endtime
##plotfilename (string) if plotbaseline is selected, the outputplot is send to this file
fitfunc (string) see fit
fitdegree (int) see fit
knotstep (int) see fit
keys (list) keys which contain the basevalues (default) is ['dx','dy','dz']
APPLICATION:
func = data.baseline(didata,knotstep=0.1,plotbaseline=True)
# fixed time range
func = data.baseline(didata,startabs='2015-02-01',endabs='2015-08-24',extradays=0)
OR:
funclist = []
funclist.append(rawdata.baseline(basevalues, extradays=0, fitfunc='poly',
fitdegree=1,startabs='2009-01-01',endabs='2009-03-22'))
funclist.append(rawdata.baseline(basevalues, extradays=0, fitfunc='poly',
fitdegree=1,startabs='2009-03-22',endabs='2009-06-27'))
funclist.append(rawdata.baseline(basevalues, extradays=0, fitfunc='spline',
knotstep=0.2,startabs='2009-06-27',endabs='2010-02-01'))
stabilitytest (bool)
"""
keys = kwargs.get('keys')
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
extradays = kwargs.get('extradays',15)
plotbaseline = kwargs.get('plotbaseline')
plotfilename = kwargs.get('plotfilename')
startabs = kwargs.get('startabs')
endabs = kwargs.get('endabs')
orgstartabs = None
orgendabs = None
#if not extradays:
# extradays = 15
if not fitfunc:
fitfunc = self.header.get('DataAbsFunc')
if not fitfunc:
fitfunc = 'spline'
if not fitdegree:
fitdegree = self.header.get('DataAbsDegree')
if not fitdegree:
fitdegree = 5
if not knotstep:
knotstep = self.header.get('DataAbsKnots')
if not knotstep:
knotstep = 0.3
if not keys:
keys = ['<KEY>']
if len(self.ndarray[0]) > 0:
ndtype = True
starttime = np.min(self.ndarray[0])
endtime = np.max(self.ndarray[0])
else:
starttime = self[0].time
endtime = self[-1].time
fixstart,fixend = False,False
if startabs:
startabs = date2num(self._testtime(startabs))
orgstartabs = startabs
fixstart = True
if endabs:
endabs = date2num(self._testtime(endabs))
orgendabs = endabs
fixend = True
pierlong = absolutedata.header.get('DataAcquisitionLongitude','')
pierlat = absolutedata.header.get('DataAcquisitionLatitude','')
pierel = absolutedata.header.get('DataElevation','')
pierlocref = absolutedata.header.get('DataAcquisitionReference','')
pierelref = absolutedata.header.get('DataElevationRef','')
#self.header['DataAbsFunc'] = fitfunc
#self.header['DataAbsDegree'] = fitdegree
#self.header['DataAbsKnots'] = knotstep
#self.header['DataAbsDate'] = datetime.strftime(datetime.utcnow(),'%Y-%m-%d %H:%M:%S')
usestepinbetween = False # for better extrapolation
logger.info(' --- Start baseline-correction at %s' % str(datetime.now()))
absolutestream = absolutedata.copy()
#print("Baseline", absolutestream.length())
absolutestream = absolutestream.remove_flagged()
#print("Baseline", absolutestream.length())
#print("Baseline", absolutestream.ndarray[0])
absndtype = False
if len(absolutestream.ndarray[0]) > 0:
#print ("HERE1: adopting time range absolutes - before {} {}".format(startabs, endabs))
absolutestream.ndarray[0] = absolutestream.ndarray[0].astype(float)
absndtype = True
if not np.min(absolutestream.ndarray[0]) < endtime:
logger.warning("Baseline: Last measurement prior to beginning of absolute measurements ")
abst = absolutestream.ndarray[0]
if not startabs or startabs < np.min(absolutestream.ndarray[0]):
startabs = np.min(absolutestream.ndarray[0])
if not endabs or endabs > np.max(absolutestream.ndarray[0]):
endabs = np.max(absolutestream.ndarray[0])
else:
# 1) test whether absolutes are in the selected absolute data stream
if absolutestream[0].time == 0 or absolutestream[0].time == float('nan'):
raise ValueError ("Baseline: Input stream needs to contain absolute data ")
# 2) check whether enddate is within abs time range or larger:
if not absolutestream[0].time-1 < endtime:
logger.warning("Baseline: Last measurement prior to beginning of absolute measurements ")
abst = absolutestream._get_column('time')
startabs = absolutestream[0].time
endabs = absolutestream[-1].time
# Initialze orgstartabd and orgendabs if not yet provided: orgabs values will be added to DataAbsInfo
if not orgstartabs:
orgstartabs = startabs
if not orgendabs:
orgendabs = endabs
#print ("HERE2a: Time range absolutes - {} {} {} {}".format(startabs, endabs, num2date(startabs), num2date(endabs)))
#print ("HERE2b: Time range datastream - {} {}".format(starttime, endtime))
# 3) check time ranges of stream and absolute values:
if startabs > starttime:
#print ('HERE2c: First absolute value measured after beginning of stream')
#logger.warning('Baseline: First absolute value measured after beginning of stream - duplicating first abs value at beginning of time series')
#if fixstart:
#
#absolutestream.add(absolutestream[0])
#absolutestream[-1].time = starttime
#absolutestream.sorting()
logger.info('Baseline: %d days without absolutes at the beginning of the stream' % int(np.floor(np.min(abst)-starttime)))
if endabs < endtime:
logger.info("Baseline: Last absolute measurement before end of stream - extrapolating baseline")
if num2date(endabs).replace(tzinfo=None) + timedelta(days=extradays) < num2date(endtime).replace(tzinfo=None):
usestepinbetween = True
if not fixend:
logger.warning("Baseline: Well... thats an adventurous extrapolation, but as you wish...")
starttime = num2date(starttime).replace(tzinfo=None)
endtime = num2date(endtime).replace(tzinfo=None)
# 4) get standard time rang of one year and extradays at start and end
# test whether absstream covers this time range including extradays
# ###########
# get boundaries
# ###########
extrapolate = False
# upper
if fixend:
#absolutestream = absolutestream.trim(endtime=endabs) # should I trim here already - leon ??
# time range long enough
baseendtime = endabs+extradays
if baseendtime < orgendabs:
baseendtime = orgendabs
extrapolate = True
else:
baseendtime = date2num(endtime+timedelta(days=1))
extrapolate = True
#if endabs >= date2num(endtime)+extradays:
# # time range long enough
# baseendtime = date2num(endtime)+extradays
# lower
if fixstart:
#absolutestream = absolutestream.trim(starttime=startabs) # should I trim here already - leon ??
basestarttime = startabs-extradays
if basestarttime > orgstartabs:
basestarttime = orgstartabs
extrapolate = True
else:
# not long enough
#basestarttime = date2num(starttime)
basestarttime = startabs-extradays
extrapolate = True
if baseendtime - (366.+2*extradays) > startabs:
# time range long enough
basestarttime = baseendtime-(366.+2*extradays)
baseendtime = num2date(baseendtime).replace(tzinfo=None)
basestarttime = num2date(basestarttime).replace(tzinfo=None)
#print ("HERE3a: basestart and end", basestarttime, baseendtime)
# Don't use trim here
#bas = absolutestream.trim(starttime=basestarttime,endtime=baseendtime)
basarray = absolutestream._select_timerange(starttime=basestarttime,endtime=baseendtime)
bas = DataStream([LineStruct()],absolutestream.header,basarray)
#print ("HERE3b: length of selected absolutes: ", bas.length()[0])
if extrapolate: # and not extradays == 0:
bas = bas.extrapolate(basestarttime,baseendtime)
#keys = ['<KEY>']
try:
print ("Fitting Baseline between: {a} and {b}".format(a=str(num2date(np.min(bas.ndarray[0]))),b=str(num2date(np.max(bas.ndarray[0])))))
print (keys, fitfunc, fitdegree, knotstep)
logger.info("Fitting Baseline between: {a} and {b}".format(a=str(num2date(np.min(bas.ndarray[0]))),b=str(num2date(np.max(bas.ndarray[0])))))
#print ("Baseline", bas.length(), keys)
#for elem in bas.ndarray:
# print elem
func = bas.fit(keys,fitfunc=fitfunc,fitdegree=fitdegree,knotstep=knotstep)
except:
print ("Baseline: Error when determining fit - Enough data point to satisfy fit complexity?")
logger.error("Baseline: Error when determining fit - Not enough data point to satisfy fit complexity? N = {}".format(bas.length()))
return None
#if len(keys) == 3:
# ix = KEYLIST.index(keys[0])
# iy = KEYLIST.index(keys[1])
# iz = KEYLIST.index(keys[2])
# get the function in some readable equation
#self.header['DataAbsDataT'] = bas.ndarray[0],bas.ndarray[ix],bas.ndarray[iy],bas.ndarray[iz]]
if plotbaseline:
#check whether plotbaseline is valid path or bool
try:
try:
import magpy.mpplot as mp
except ImportError:
print ("baseline: Could not load package mpplot")
if plotfilename:
mp.plot(bas,variables=['dx','dy','dz'],padding = [5,0.005,5], symbollist = ['o','o','o'],function=func,plottitle='Absolute data',outfile=plotfilename)
else:
mp.plot(bas,variables=['dx','dy','dz'],padding = [5,0.005,5], symbollist = ['o','o','o'],function=func,plottitle='Absolute data')
except:
print("using the internal plotting routine requires mpplot to be imported as mp")
keystr = '_'.join(keys)
pierlong = absolutedata.header.get('DataAcquisitionLongitude','')
pierlat = absolutedata.header.get('DataAcquisitionLatitude','')
pierel = absolutedata.header.get('DataElevation','')
pierlocref = absolutedata.header.get('DataLocationReference','')
pierelref = absolutedata.header.get('DataElevationRef','')
if not pierlong == '' and not pierlat == '' and not pierel == '':
absinfostring = '_'.join(map(str,[orgstartabs,orgendabs,extradays,fitfunc,fitdegree,knotstep,keystr,pierlong,pierlat,pierlocref,pierel,pierelref]))
else:
absinfostring = '_'.join(map(str,[orgstartabs,orgendabs,extradays,fitfunc,fitdegree,knotstep,keystr]))
existingabsinfo = self.header.get('DataAbsInfo','').replace(', EPSG',' EPSG').split(',')
if not existingabsinfo[0] == '':
existingabsinfo.append(absinfostring)
else:
existingabsinfo = [absinfostring]
# Get minimum and maximum times out of existing absinfostream
minstarttime=100000000.0
maxendtime=0.0
for el in existingabsinfo:
ele = el.split('_')
mintime = float(ele[0])
maxtime = float(ele[1])
if minstarttime > mintime:
minstarttime = mintime
if maxendtime < maxtime:
maxendtime = maxtime
exabsstring = ','.join(existingabsinfo)
self.header['DataAbsInfo'] = exabsstring # 735582.0_735978.0_0_spline_5_0.3_dx_dy_dz
#print ("HERE5a:", minstarttime, maxendtime, absolutestream.length()[0])
bas2save = absolutestream.trim(starttime=minstarttime,endtime=maxendtime)
tmpdict = bas2save.stream2dict()
#print ("HERE5b:", bas2save.length()[0])
self.header['DataBaseValues'] = tmpdict['DataBaseValues']
# Get column heads of dx,dy and dz
# default is H-base[nT],D-base[deg],Z-base[nT]
basecomp = "HDZ"
try:
basecomp = "{}{}{}".format(absolutestream.header.get('col-dx')[0],absolutestream.header.get('col-dy')[0],absolutestream.header.get('col-dz')[0])
except:
pass
if not basecomp == "HDZ":
print (" -> basevalues correspond to components {}".format(basecomp))
self.header['DataBaseComponents'] = basecomp
#self.header['DataAbsMinTime'] = func[1] #num2date(func[1]).replace(tzinfo=None)
#self.header['DataAbsMaxTime'] = func[2] #num2date(func[2]).replace(tzinfo=None)
#self.header['DataAbsFunctionObject'] = func
logger.info(' --- Finished baseline-correction at %s' % str(datetime.now()))
return func
def stream2dict(self, keys=['dx','dy','dz'], dictkey='DataBaseValues'):
"""
DESCRIPTION:
Method to convert stream contents into a list and assign this to a dictionary.
You can use this method to directly store magnetic basevalues along with
data time series (e.g. using NasaCDF). Multilayer storage as supported by NetCDF
might provide better options to combine both data sets in one file.
PARAMETERS:
stream (DataStream) data containing e.g. basevalues
keys (list of keys) keys which are going to be stored
dictkey (string) name of the dictionaries key
RETURNS:
dict (dictionary) with name dictkey
APPLICATION:
>>> d = absdata.stream2dict(['dx','dy','dz'],'DataBaseValues')
>>> d = neicdata.stream2dict(['f','str3'],'Earthquakes')
"""
if not self.length()[0] > 0:
return {}
if not len(keys) > 0:
return {}
d = {}
keylst = ['time']
keylst.extend(keys)
array,headline,addline = [],[],[]
for key in keylst:
try:
pos = KEYLIST.index(key)
except ValueError:
pos = -1
if pos in range(0,len(KEYLIST)):
headline.append(key)
if not key == 'time':
addline.append(self.header.get('col-'+key))
else:
addline.append(self.header.get('DataID'))
column = self.ndarray[pos]
array.append(column)
rowlst = np.transpose(np.asarray(array)).astype(object)
fulllst = np.insert(rowlst,0,np.asarray(addline).astype(object),axis=0) ##could be used to store column names and id in time column
fulllst = np.insert(fulllst,0,np.asarray(headline).astype(object),axis=0)
d[dictkey] = fulllst
return d
def dict2stream(self,dictkey='DataBaseValues'):
"""
DESCRIPTION:
Method to convert the list stored in stream.header['DataBaseValue']
to an absolute stream.
PARAMETERS:
stream (DataStream) stream with variation data
dictkey (string) ususally 'DataBaseValues'
RETURNS:
stream (DataStream) containing values of header info
APPLICATION:
>>> absstream = stream.dict2stream(header['DataBaseValues'])
"""
lst = self.header.get(dictkey)
if not type(lst) in (list,tuple,np.ndarray):
print("dict2stream: no list,tuple,array found in provided header key")
return DataStream()
if len(lst) == 0:
print("dict2stream: list is empty")
return DataStream()
array = [[] for el in KEYLIST]
headerinfo = lst[0]
addinfo = lst[1]
data = lst[2:]
#print(headerinfo,addinfo)
collst = np.transpose(np.asarray(data)).astype(object)
#print(collst)
for idx,key in enumerate(headerinfo):
pos = KEYLIST.index(key)
array[pos] = collst[idx]
return DataStream([LineStruct()], {}, np.asarray(array,dtype=object))
def baselineAdvanced(self, absdata, baselist, **kwargs):
"""
DESCRIPTION:
reads stream, didata and baseline list
-> save separate monthly cdf's for each baseline input
-> Filename contains date of baseline jump
RETURNS:
list of header and ndarray -> this is necessary for datastreams
"""
sensid = kwargs.get('sensorid')
plotbaseline = kwargs.get('plotbaseline')
data = self.copy()
# Get start and endtime of stream
ts,te = data._find_t_limits()
# Get start and endtime of di data
tabss,tabse = absdata._find_t_limits()
# Some checks
if tabss > te or tabse < ts:
print ("baselineAdvanced: No DI data for selected stream available -aborting")
return False
if tabss > ts:
print ("baselineAdvanced: DI data does not cover the time range of stream - trimming stream")
data = data.trim(starttime=tabss)
if tabse < te:
print ("baselineAdvanced: DI data does not cover the time range of stream - trimming stream")
data = data.trim(endtime=tabse)
# Getting relevant baseline info
sensid = self.header.get('SensorID','')
if sensid == '':
print ("baselineAdvanced: No SensorID in header info - provide by option sensorid='XXX'")
return False
indlist = [ind for ind, elem in enumerate(baselist[0]) if elem == sensid]
#print "writeBC", indlist
senslist = [[el for idx,el in enumerate(elem) if idx in indlist] for elem in baselist]
#print "writeBC", senslist
#print "writeBC", senslist[1]
if not len(senslist) > 0:
print ("baselineAdvanced: Did not find any valid baseline parameters for selected sensor")
return False
# get index of starttime closest before
beforeinds = [[ind,np.abs(date2num(ts)-elem)] for ind, elem in enumerate(senslist[1]) if elem < date2num(ts)]
#print "writeBC", beforeinds
minl = [el[1] for el in beforeinds]
#print "writeBC minl", minl
startind = beforeinds[minl.index(np.min(minl))][0]
#print "writeBC", startind
vallist = [[el for idx,el in enumerate(elem) if idx == startind] for elem in senslist]
#print vallist
validinds = [ind for ind, elem in enumerate(senslist[1]) if elem >= date2num(ts) and elem <= date2num(te)]
#print "writeBC inds", validinds
vallist2 = [[el for idx,el in enumerate(elem) if idx in validinds] for elem in senslist]
#print vallist2
if len(vallist2[0]) > 0:
resultlist = []
for idx, elem in enumerate(vallist):
addelem = vallist2[idx]
print(elem, addelem)
elem.extend(addelem)
resultlist.append(elem)
else:
resultlist = vallist
print("baselineAdvanced: inds", resultlist)
# Select appropriate time ranges from stream
if not len(resultlist[0]) > 0:
print ("baselineAdvanced: Did not find any valid baseline parameters for selected sensor")
return False
streamlist = []
dictlist = []
resultlist = np.asarray(resultlist)
vals = resultlist.transpose()
for idx, elem in enumerate(vals):
#print "writeBC running", elem
mintime = float(elem[1])
maxtime = float(elem[2])
array = data._select_timerange(starttime=mintime, endtime=maxtime)
stream = DataStream(data,data.header,array)
baselinefunc = stream.baseline(absdata,startabs=mintime,endabs=maxtime, fitfunc=elem[3],fitdegree=int(elem[4]),knotstep=float(elem[5]),plotbaseline=plotbaseline)
#stream = stream.bc()
#exec('stream'+str(idx)+'= DataStream(stream,stream.header,stream.ndarray)')
dicthead = stream.header
#dictlist.append(dicthead.copy()) # Note: append just adds a pointer to content - use copy
#streamlist.append([dicthead.copy(),stream.ndarray])
streamlist.append([DataStream([LineStruct()],dicthead.copy(),stream.ndarray),baselinefunc])
#print "Streamlist", streamlist
#print len(dicthead),dictlist
return streamlist
def bc(self, function=None, ctype=None, alpha=0.0,level='preliminary'):
"""
DEFINITION:
Method to obtain baseline corrected data. By default flagged data is removed
before baseline correction.
Requires DataAbs values in the datastreams header.
The function object is transferred to keys x,y,z, please note that the baseline function
is stored in HDZ format (H:nT, D:0.0000 deg, Z: nT).
By default the bc method requires HDZ oriented variometer data. If XYZ data is provided,
or any other orientation, please provided rotation angles to transform this data into HDZ.
Example: For XYZ data please add the option alpha=DeclinationAtYourSite in a
float format of 0.00000 deg
PARAMETERS:
function (function object) provide the function directly - not from header
ctype (string) one of 'fff', 'fdf', 'ddf' - denoting nT components 'f' and degree 'd'
alpha/beta (floats) provide rotation angles for the variometer data to be applied
before correction - data is rotated back after correction
"""
logger.debug("BC: Performing baseline correction: Requires HEZ data.")
logger.debug(" H magnetic North, E magnetic East, Z vertical downwards, all in nT.")
pierdata = False
absinfostring = self.header.get('DataAbsInfo')
absvalues = self.header.get('DataBaseValues')
func = self.header.get('DataAbsFunctionObject')
datatype = self.header.get('DataType')
basecomp = self.header.get('DataBaseComponents')
if datatype == 'BC':
print ("BC: dataset is already baseline corrected - returning")
return self
bcdata = self.copy()
logger.debug("BC: Components of stream: {}".format(self.header.get('DataComponents')))
logger.debug("BC: baseline adoption information: {}".format(absinfostring))
if absinfostring and type(absvalues) in [list,np.ndarray,tuple]:
#print("BC: Found baseline adoption information in meta data - correcting")
absinfostring = absinfostring.replace(', EPSG',' EPSG')
absinfostring = absinfostring.replace(',EPSG',' EPSG')
absinfostring = absinfostring.replace(', epsg',' EPSG')
absinfostring = absinfostring.replace(',epsg',' EPSG')
absinfolist = absinfostring.split(',')
funclist = []
for absinfo in absinfolist:
#print("BC: TODO repeat correction several times and check header info")
# extract baseline data
absstream = bcdata.dict2stream()
#print("BC: abstream length", absstream.length()[0])
parameter = absinfo.split('_')
#print("BC:", parameter, len(parameter))
funckeys = parameter[6:9]
if len(parameter) >= 14:
#extract pier information
pierdata = True
pierlon = float(parameter[9])
pierlat = float(parameter[10])
pierlocref = parameter[11]
pierel = float(parameter[12])
pierelref = parameter[13]
#print("BC", num2date(float(parameter[0])))
#print("BC", num2date(float(parameter[1])))
if not funckeys == ['df']:
func = bcdata.baseline(absstream, startabs=float(parameter[0]), endabs=float(parameter[1]), extradays=int(float(parameter[2])), fitfunc=parameter[3], fitdegree=int(float(parameter[4])), knotstep=float(parameter[5]), keys=funckeys)
if 'dx' in funckeys:
func[0]['fx'] = func[0]['fdx']
func[0]['fy'] = func[0]['fdy']
func[0]['fz'] = func[0]['fdz']
func[0].pop('fdx', None)
func[0].pop('fdy', None)
func[0].pop('fdz', None)
keys = ['x','y','z']
elif 'x' in funckeys:
keys = ['x','y','z']
else:
print("BC: could not interpret BaseLineFunctionObject - returning")
return self
funclist.append(func)
#TODO addbaseline
#if AbsData contain xyz use mode='add'
datacomp = bcdata.header.get('DataComponents','')
if basecomp in ['xyz','XYZ']:
bcdata = bcdata.func2stream(funclist,mode='add',keys=keys)
bcdata.header['col-x'] = 'X'
bcdata.header['unit-col-x'] = 'nT'
bcdata.header['col-y'] = 'Y'
bcdata.header['unit-col-y'] = 'nT'
if len(datacomp) == 4:
bcdata.header['DataComponents'] = 'XYZ'+datacomp[3]
else:
bcdata.header['DataComponents'] = 'XYZ'
else:
#print ("BC: Found a list of functions:", funclist)
bcdata = bcdata.func2stream(funclist,mode='addbaseline',keys=keys)
bcdata.header['col-x'] = 'H'
bcdata.header['unit-col-x'] = 'nT'
bcdata.header['col-y'] = 'D'
bcdata.header['unit-col-y'] = 'deg'
datacomp = bcdata.header.get('DataComponents','')
if len(datacomp) == 4:
bcdata.header['DataComponents'] = 'HDZ'+datacomp[3]
else:
bcdata.header['DataComponents'] = 'HDZ'
# Add BC mark to datatype - data is baseline corrected
bcdata.header['DataType'] = 'BC'
# Update location data from absinfo
if pierdata:
self.header['DataAcquisitionLongitude'] = pierlon
self.header['DataAcquisitionLatitude'] = pierlat
self.header['DataLocationReference'] = pierlocref
self.header['DataElevation'] = pierel
self.header['DataElevationRef'] = pierelref
return bcdata
elif func:
# 1.) move content of basevalue function to columns 'x','y','z'?
try:
func[0]['fx'] = func[0]['fdx']
func[0]['fy'] = func[0]['fdy']
func[0]['fz'] = func[0]['fdz']
func[0].pop('fdx', None)
func[0].pop('fdy', None)
func[0].pop('fdz', None)
keys = ['<KEY>']
except:
print("BC: could not interpret BaseLineFunctionObject - returning")
return self
# 2.) eventually transform self - check header['DataComponents']
if ctype == 'fff':
pass
elif ctype == 'ddf':
pass
else:
pass
#eventually use other information like absolute path, and function parameter
#for key in self.header:
# if key.startswith('DataAbs'):
# print key, self.header[key]
# drop all lines with nan values in either x or y and if x=0 add some 0.00001 because of arctan(y/x)
#print len(self.ndarray[0])
#for elem in self.ndarray[1]:
# if np.isnan(elem) or elem == 0.0:
# print "Found", elem
#self = self._drop_nans('x')
#self = self._drop_nans('y')
#print len(self.ndarray[0])
bcdata = bcdata.func2stream(func,mode='addbaseline',keys=['x','y','z'])
bcdata.header['col-x'] = 'H'
bcdata.header['unit-col-x'] = 'nT'
bcdata.header['col-y'] = 'D'
bcdata.header['unit-col-y'] = 'deg'
bcdata.header['DataComponents'] = 'HDZ'
return bcdata
else:
print("BC: No data for correction available - header needs to contain DataAbsFunctionObject")
return self
def bindetector(self,key,flagnum=1,keystoflag=['x'],sensorid=None,text=None,**kwargs):
"""
DEFINITION:
Function to detect changes between 0 and 1 and create a flaglist for zero or one states
PARAMETERS:
key: (key) key to investigate
flagnum: (int) integer between 0 and 4, default is 0
keystoflag: (list) list of keys to be flagged
sensorid: (string) sensorid for flaglist, default is sensorid of self
text: (string) text to be added to comments/stdout,
will be extended by on/off
Kwargs:
markallon: (BOOL) add comment to all ons
markalloff: (BOOL) add comment to all offs
onvalue: (float) critical value to determin on stage (default = 0.99)
RETURNS:
- flaglist
EXAMPLE:
>>> flaglist = stream.bindetector('z',0,'x',SensorID,'Maintanence switch for rain bucket',markallon=True)
"""
markallon = kwargs.get('markallon')
markalloff = kwargs.get('markalloff')
onvalue = kwargs.get('onvalue')
if not markallon and not markalloff:
markallon = True
if not onvalue:
onvalue = 0.99
if not sensorid:
sensorid = self.header.get('SensorID')
if not len(self.ndarray[0]) > 0:
print ("bindetector: No ndarray data found - aborting")
return self
moddate = datetime.utcnow()
ind = KEYLIST.index(key)
startstate = self.ndarray[ind][0]
flaglist=[]
# Find switching states (Joe Kington: http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-fulfilling-condition-in-a-numpy-array)
d = np.diff(self.ndarray[ind])
idx, = d.nonzero()
idx += 1
if markallon:
if not text:
text = 'on'
if self.ndarray[ind][0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if self.ndarray[ind][-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, self.ndarray[ind].size] # Edit
# Reshape the result into two columns
#print("Bindetector", idx, idx.size)
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(self.ndarray[0][start]).replace(tzinfo=None),num2date(self.ndarray[0][stop]).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.append(flagline)
if markalloff:
if not text:
text = 'off'
if not self.ndarray[ind][0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if not self.ndarray[ind][-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, self.ndarray[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(self.ndarray[0][start]).replace(tzinfo=None),num2date(self.ndarray[0][stop]).replace(tzinfo=None),elem,int(flagid),text,sensorid,moddate]
flaglist.append(flagline)
return flaglist
def calc_f(self, **kwargs):
"""
DEFINITION:
Calculates the f form x^2+y^2+z^2. If delta F is present, then by default
this value is added as well
PARAMETERS:
Kwargs:
- offset: (array) containing three elements [xoffset,yoffset,zoffset],
- skipdelta (bool) id selecetd then an existing delta f is not accounted for
RETURNS:
- DataStream with f and, if given, offset corrected xyz values
EXAMPLES:
>>> fstream = stream.calc_f()
>>> fstream = stream.calc_f(offset=[20000,0,43000])
"""
# Take care: if there is only 0.1 nT accuracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
skipdelta = kwargs.get('skipdelta')
if not offset:
offset = [0,0,0]
else:
if not len(offset) == 3:
logger.error('calc_f: offset with wrong dimension given - needs to contain a three dim array like [a,b,c] - returning stream without changes')
return self
ndtype = False
try:
if len(self.ndarray[0]) > 0:
ndtype = True
elif len(self) > 1:
ndtype = False
else:
logger.error('calc_f: empty stream - aborting')
return self
except:
logger.error('calc_f: inapropriate data provided - aborting')
return self
logger.info('calc_f: --- Calculating f started at %s ' % str(datetime.now()))
if ndtype:
inddf = KEYLIST.index('df')
indf = KEYLIST.index('f')
indx = KEYLIST.index('x')
indy = KEYLIST.index('y')
indz = KEYLIST.index('z')
if len(self.ndarray[inddf]) > 0 and not skipdelta:
df = self.ndarray[inddf].astype(float)
else:
df = np.asarray([0.0]*len(self.ndarray[indx]))
x2 = ((self.ndarray[indx]+offset[0])**2).astype(float)
y2 = ((self.ndarray[indy]+offset[1])**2).astype(float)
z2 = ((self.ndarray[indz]+offset[2])**2).astype(float)
self.ndarray[indf] = np.sqrt(x2+y2+z2) + df
else:
for elem in self:
elem.f = np.sqrt((elem.x+offset[0])**2+(elem.y+offset[1])**2+(elem.z+offset[2])**2)
self.header['col-f'] = 'f'
self.header['unit-col-f'] = 'nT'
logger.info('calc_f: --- Calculating f finished at %s ' % str(datetime.now()))
return self
def compensation(self, **kwargs):
"""
DEFINITION:
Method for magnetic variometer data:
Applies eventually present compensation field values in the header
to the vector x,y,z.
Compensation fields are provided in mirco Tesla (according to LEMI data).
Please note that any additional provided "DataDeltaValues" are also applied
by default (to avoid use option skipdelta=True).
Calculation:
This method uses header information data.header[''].
After successfull application data.header['DeltaValuesApplied']
is set to 1.
PARAMETERS:
Kwargs:
- skipdelta (bool) if True then DataDeltaValues are ignored
RETURNS:
- DataStream with compensation values appliesd to xyz values
- original dataStream if no compensation values are found
EXAMPLES:
>>> compstream = stream.compensation()
"""
skipdelta = kwargs.get('skipdelta')
if not self.length()[0] > 0:
return self
stream = self.copy()
logger.info("compensation: applying compensation field values to variometer data ...")
deltas = stream.header.get('DataDeltaValues','')
if not skipdelta and not deltas=='':
logger.info("compensation: applying delta values from header['DataDeltaValues'] first")
stream = stream.offset(deltas)
stream.header['DataDeltaValuesApplied'] = 1
offdict = {}
xcomp = stream.header.get('DataCompensationX','0')
ycomp = stream.header.get('DataCompensationY','0')
zcomp = stream.header.get('DataCompensationZ','0')
if not float(xcomp)==0.:
offdict['x'] = -1*float(xcomp)*1000.
if not float(ycomp)==0.:
offdict['y'] = -1*float(ycomp)*1000.
if not float(zcomp)==0.:
offdict['z'] = -1*float(zcomp)*1000.
logger.info(' -- applying compensation fields: x={}, y={}, z={}'.format(xcomp,ycomp,zcomp))
if len(offdict) > 0:
stream = stream.offset(offdict)
stream.header['DataDeltaValuesApplied'] = 1
return stream
def cut(self,length,kind=0,order=0):
"""
DEFINITION:
cut returns the selected amount of lines from datastreams
PARAMETER:
stream : datastream
length : provide the amount of lines to be returned (default: percent of stream length)
kind : define the kind of length parameter
= 0 (default): length is given in percent
= 1: length is given in number of lines
order : define from which side
= 0 (default): the last amount of lines are returned
= 1: lines are counted from the beginning
VERSION:
added in MagPy 0.4.6
APPLICATION:
# length of stream: 86400
cutstream = stream.cut(50)
# length of cutstream: 43200
"""
stream = self.copy()
if length <= 0:
print ("get_last: length needs to be > 0")
return stream
if kind == 0:
if length > 100:
length = 100
amount = int(stream.length()[0]*length/100.)
else:
if length > stream.length()[0]:
return stream
else:
amount = length
for idx,el in enumerate(stream.ndarray):
if len(el) >= amount:
if order == 0:
nel = el[-amount:]
else:
nel = el[:amount]
stream.ndarray[idx] = nel
return stream
def dailymeans(self, keys=['x','<KEY>'], offset = 0.5, keepposition=False, **kwargs):
"""
DEFINITION:
Calculates daily means of xyz components and their standard deviations. By default
numpy's mean and std methods are applied even if only two data sets are available.
TODO ---
If less then three data sets are provided, twice the difference between two values
is used as an conservative proxy of uncertainty. I only on value is available, then
the maximum uncertainty of the collection is assumed. This behavior can be changed
by keyword arguments.
TODO ---
An outputstream is generated which containes basevalues in columns
x,y,z and uncertainty values in dx,dy,dz
if only a single values is available, dx,dy,dz contain the average uncertainties
of the full data set
time column contains the average time of the measurement
PARAMETERS:
Variables
- keys: (list) provide up to four keys which are used in columns x,y,z
- offset: (float) offset in timeunit days (0 to 0.999) default is 0.5, some test might use 0
Kwargs:
- none
RETURNS:
- stream: (DataStream object) with daily means and standard deviation
EXAMPLE:
>>> means = didata.dailymeans(keys=['dx','dy','dz'])
APPLICATION:
>>> means = didata.dailymeans(keys=['dx','dy','dz'])
>>> mp.plot(means,['x','y','z'],errorbars=True, symbollist=['o','o','o'])
"""
percentage = 90
keys = keys[:4]
poslst,deltaposlst = [],[]
deltakeys = ['dx','dy','dz','df']
for key in keys:
poslst.append(KEYLIST.index(key))
for idx,pos in enumerate(poslst):
deltaposlst.append(KEYLIST.index(deltakeys[idx]))
if not len(self.ndarray[0]) > 0:
return self
array = [[] for el in KEYLIST]
data = self.copy()
data = data.removeduplicates()
timecol = np.floor(data.ndarray[0])
tmpdatelst = np.asarray(list(set(list(timecol))))
for day in tmpdatelst:
sel = data._select_timerange(starttime=day,endtime=day+1)
"""
#for idx,day in enumerate(daylst):
#sel = final._select_timerange(starttime=np.round(day), endtime=np.round(day)+1)
"""
#print (len(sel))
sttmp = DataStream([LineStruct()],{},sel)
array[0].append(day+offset)
for idx, pos in enumerate(poslst):
#if len(sttmp.ndarray[idx+1]) > 0:
if not keepposition:
array[idx+1].append(sttmp.mean(KEYLIST[pos],percentage=percentage))
else:
array[pos].append(sttmp.mean(KEYLIST[pos],percentage=percentage))
#print ("Check", KEYLIST[pos], idx+1, len(sttmp._get_column(KEYLIST[pos])),sttmp._get_column(KEYLIST[pos]),sttmp.mean(KEYLIST[pos],percentage=percentage))
"""
#array[0].append(day+0.5)
#for idx,pos in enumerate(poslst):
array[idx+1].append(np.mean(sel[pos],percentage=percentage))
"""
data.header['col-'+KEYLIST[idx+1]] = '{}'.format(self.header.get('col-'+KEYLIST[pos]))
data.header['unit-col-'+KEYLIST[idx+1]] = '{}'.format(self.header.get('unit-col-'+KEYLIST[pos]))
diff = pos-idx
if not keepposition:
for idx,dpos in enumerate(deltaposlst):
#if len(sttmp.ndarray[idx]) > 0:
me,std = sttmp.mean(KEYLIST[idx+diff],percentage=percentage, std=True)
array[dpos].append(std)
#array[dpos].append(np.std(sel[idx+diff]))
data.header['col-'+KEYLIST[dpos]] = 'sigma {}'.format(self.header.get('col-'+KEYLIST[idx+diff]))
data.header['unit-col-'+KEYLIST[dpos]] = '{}'.format(self.header.get('unit-col-'+KEYLIST[idx+diff]))
data.header['DataFormat'] = 'MagPyDailyMean'
array = [np.asarray(el) for el in array]
retstream = DataStream([LineStruct()],data.header,np.asarray(array))
retstream = retstream.sorting()
return retstream
def date_offset(self, offset):
"""
IMPORTANT:
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
THIS METHOD IS NOT SUPPORTED ANY MORE. PLEASE USE
self.offset({'time':timedelta(seconds=1000)}) INSTEAD
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DEFINITION:
Corrects the time column of the selected stream by the offst
offset is a timedelta object (e.g. timedelta(hours=1))
PARAMETERS:
Variables:
- offset: (timedelta object) Offset to apply to stream.
Kwargs:
- None
RETURNS:
- stream: (DataStream object) Stream with offset applied.
EXAMPLE:
>>> data = data.offset(timedelta(minutes=3))
APPLICATION:
"""
header = self.header
newstream = DataStream()
array = self.ndarray
if len(ndarray[0]) > 0:
ndtype = True
secsperday = 3600*24
ndarray[0] = ndarray[0] + offset.total_seconds/secsperday
for elem in self:
newtime = num2date(elem.time).replace(tzinfo=None) + offset
elem.sectime = elem.time
elem.time = date2num(newtime)
newstream.add(elem)
logger.info('date_offset: Corrected time column by %s sec' % str(offset.total_seconds))
return DataStream(newstream,header,array)
def delta_f(self, **kwargs):
"""
DESCRIPTION:
Calculates the difference of x+y+z to f and puts the result to the df column
PARAMETER:
keywords:
:type offset: float
:param offset: constant offset to f values
:type digits: int
:param digits: number of digits to be rounded (should equal the input precision)
"""
# Take care: if there is only 0.1 nT accurracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
digits = kwargs.get('digits')
if not offset:
offset = 0
if not digits:
digits = 8
logger.info('--- Calculating delta f started at %s ' % str(datetime.now()))
try:
syst = self.header['DataComponents']
except:
syst = None
ind = KEYLIST.index("df")
indx = KEYLIST.index("x")
indy = KEYLIST.index("y")
indz = KEYLIST.index("z")
indf = KEYLIST.index("f")
if len(self.ndarray[0])>0 and len(self.ndarray[indx])>0 and len(self.ndarray[indy])>0 and len(self.ndarray[indz])>0 and len(self.ndarray[indf])>0:
# requires x,y,z and f
arx = self.ndarray[indx]**2
ary = self.ndarray[indy]**2
arz = self.ndarray[indz]**2
if syst in ['HDZ','hdz','HDZF','hdzf','HDZS','hdzs','HDZG','hdzg']:
print("deltaF: found HDZ orientation")
ary = np.asarray([0]*len(self.ndarray[indy]))
sumar = list(arx+ary+arz)
sqr = np.sqrt(np.asarray(sumar))
self.ndarray[ind] = sqr - (self.ndarray[indf] + offset)
else:
for elem in self:
elem.df = round(np.sqrt(elem.x**2+elem.y**2+elem.z**2),digits) - (elem.f + offset)
self.header['col-df'] = 'delta f'
self.header['unit-col-df'] = 'nT'
logger.info('--- Calculating delta f finished at %s ' % str(datetime.now()))
return self
def f_from_df(self, **kwargs):
"""
DESCRIPTION:
Calculates the f from the difference of x+y+z and df
PARAMETER:
keywords:
:type offset: float
:param offset: constant offset to f values
:type digits: int
:param digits: number of digits to be rounded (should equal the input precision)
"""
# Take care: if there is only 0.1 nT accurracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
digits = kwargs.get('digits')
if not offset:
offset = 0.
if not digits:
digits = 8
logger.info('--- Calculating f started at %s ' % str(datetime.now()))
try:
syst = self.header['DataComponents']
except:
syst = None
ind = KEYLIST.index("df")
indx = KEYLIST.index("x")
indy = KEYLIST.index("y")
indz = KEYLIST.index("z")
indf = KEYLIST.index("f")
if len(self.ndarray[0])>0 and len(self.ndarray[indx])>0 and len(self.ndarray[indy])>0 and len(self.ndarray[indz])>0 and len(self.ndarray[ind])>0:
# requires x,y,z and f
arx = self.ndarray[indx]**2
ary = self.ndarray[indy]**2
arz = self.ndarray[indz]**2
if syst in ['HDZ','hdz','HDZF','hdzf','HDZS','hdzs','HDZG','hdzg']:
print("deltaF: found HDZ orientation")
ary = np.asarray([0]*len(self.ndarray[indy]))
sumar = list(arx+ary+arz)
sqr = np.sqrt(np.asarray(sumar))
self.ndarray[indf] = sqr - (self.ndarray[ind] + offset)
else:
for elem in self:
elem.f = round(np.sqrt(elem.x**2+elem.y**2+elem.z**2),digits) - (elem.df + offset)
self.header['col-f'] = 'f'
self.header['unit-col-f'] = 'nT'
logger.info('--- Calculating f finished at %s ' % str(datetime.now()))
return self
def differentiate(self, **kwargs):
"""
DEFINITION:
Method to differentiate all columns with respect to time.
-- Using successive gradients
PARAMETERS:
Variables:
keys: (list - default ['x','y','z','f'] provide limited key-list
put2key
- keys: (list) Provide limited key-list. default = ['x','y','z','f']
- put2keys: (type) Provide keys to put differentiated keys to.
Default = ['dx','dy','dz','df']
Kwargs:
RETURNS:
- stream: (DataStream) Differentiated data stream, x values in dx, etc..
EXAMPLE:
>>> stream = stream.differentiate(keys=['f'],put2keys=['df'])
APPLICATION:
"""
logger.info('differentiate: Calculating derivative started.')
keys = kwargs.get('keys')
put2keys = kwargs.get('put2keys')
if not keys:
keys = ['<KEY>']
if not put2keys:
put2keys = ['<KEY>']
if len(keys) != len(put2keys):
logger.error('Amount of columns read must be equal to outputcolumns')
return self
stream = self.copy()
ndtype = False
if len(stream.ndarray[0]) > 0:
t = stream.ndarray[0].astype(float)
ndtype = True
else:
t = stream._get_column('time')
for i, key in enumerate(keys):
if ndtype:
ind = KEYLIST.index(key)
val = stream.ndarray[ind].astype(float)
else:
val = stream._get_column(key)
dval = np.gradient(np.asarray(val))
stream._put_column(dval, put2keys[i])
stream.header['col-'+put2keys[i]] = r"d%s vs dt" % (key)
logger.info('--- derivative obtained at %s ' % str(datetime.now()))
return stream
def DWT_calc(self,key='x',wavelet='db4',level=3,plot=False,outfile=None,
window=5):
"""
DEFINITION:
Discrete wavelet transform (DWT) method of analysing a magnetic signal
to pick out SSCs. This method was taken from Hafez (2013): "Systematic examination
of the geomagnetic storm sudden commencement using multi resolution analysis."
(NOTE: PyWavelets package must be installed for this method. It should be applied
to 1s data - otherwise the sample window should be changed.)
METHOD:
1. Use the 4th-order Daubechies wavelet filter to calculate the 1st to 3rd details
(D1, D2, D3) of the geomagnetic signal. This is applied to a sliding window of
five samples.
2. The 3rd detail (D3) samples are squared to evaluate the magnitude.
3. The sample window (5) is averaged to avoid ripple effects. (This means the
returned stream will have ~1/5 the size of the original.)
PARAMETERS:
Variables:
- key: (str) Apply DWT to this key. Default 'x' due to SSCs dominating
the horizontal component.
- wavelet: (str) Type of filter to use. Default 'db4' (4th-order Daubechies
wavelet filter) according to Hafez (2013).
- level: (int) Decomposition level. Will calculate details down to this level.
Default 3, also Hafez (2013).
- plot: (bool) If True, will display a plot of A3, D1, D2 and D3.
- outfile: (str) If given, will plot will be saved to 'outfile' path.
- window: (int) Length of sample window. Default 5, i.e. 5s with second data.
RETURNS:
- DWT_stream: (DataStream object) A stream containing the following:
'x': A_n (approximation function)
'var1': D1 (first detail)
'var2': D2 (second detail)
'var3': D3 (third detail)
... will have to be changed if higher details are required.
EXAMPLE:
>>> DWT_stream = stream.DWT_calc(plot=True)
APPLICATION:
# Storm detection using detail 3 (D3 = var3):
from magpy.stream import *
stream = read('LEMI_1s_Data_2014-02-15.cdf') # 2014-02-15 is a good storm example
DWT_stream = stream.DWT_calc(plot=True)
Da_min = 0.0005 # nT^2 (minimum amplitude of D3 for storm detection)
Dp_min = 40 # seconds (minimum period of Da > Da_min for storm detection)
detection = False
for row in DWT_stream:
if row.var3 >= Da_min and detection == False:
timepin = row.time
detection = True
elif row.var3 < Da_min and detection == True:
duration = (num2date(row.time) - num2date(timepin)).seconds
if duration >= Dp_min:
print "Storm detected!"
print duration, num2date(timepin)
detection = False
"""
# Import required package PyWavelets:
# http://www.pybytes.com/pywavelets/index.html
import pywt
# 1a. Grab array from stream
data = self._get_column(key)
t_ind = KEYLIST.index('time')
#DWT_stream = DataStream([],{})
DWT_stream = DataStream()
headers = DWT_stream.header
array = [[] for key in KEYLIST]
x_ind = KEYLIST.index('x')
dx_ind = KEYLIST.index('dx')
var1_ind = KEYLIST.index('var1')
var2_ind = KEYLIST.index('var2')
var3_ind = KEYLIST.index('var3')
i = 0
logger.info("DWT_calc: Starting Discrete Wavelet Transform of key %s." % key)
# 1b. Loop for sliding window
while True:
if i >= (len(data)-window):
break
#row = LineStruct()
# Take the values in the middle of the window (not exact but changes are
# not extreme over standard 5s window)
#row.time = self[i+window/2].time
array[t_ind].append(self.ndarray[t_ind][i+int(window/2)])
data_cut = data[i:i+window]
#row.x = sum(data_cut)/float(window)
array[x_ind].append(sum(data_cut)/float(window))
# 1c. Calculate wavelet transform coefficients
# Wavedec produces results in form: [cA_n, cD_n, cD_n-1, ..., cD2, cD1]
# (cA_n is a list of coefficients for an approximation for the nth order.
# All cD_n are coefficients for details n --> 1.)
coeffs = pywt.wavedec(data_cut, wavelet, level=level)
# 1d. Calculate approximation and detail functions from coefficients
take = len(data_cut) # (Length of fn from coeffs = length of original data)
functions = []
approx = True
for item in coeffs:
if approx:
part = 'a' # Calculate approximation function
else:
part = 'd' # Calculate detail function
function = pywt.upcoef(part, item, wavelet, level=level, take=take)
functions.append(function)
approx = False
# 2. Square the results
fin_fns = []
for item in functions:
item_sq = [j**2 for j in item]
# 3. Average over the window
val = sum(item_sq)/window
fin_fns.append(val)
# TODO: This is hard-wired for level=3.
#row.dx, row.var1, row.var2, row.var3 = fin_fns
array[dx_ind].append(fin_fns[0])
array[var1_ind].append(fin_fns[3])
array[var2_ind].append(fin_fns[2])
array[var3_ind].append(fin_fns[1])
#DWT_stream.add(row)
i += window
logger.info("DWT_calc: Finished DWT.")
DWT_stream.header['col-x'] = 'A3'
DWT_stream.header['unit-col-x'] = 'nT^2'
DWT_stream.header['col-var1'] = 'D1'
DWT_stream.header['unit-col-var1'] = 'nT^2'
DWT_stream.header['col-var2'] = 'D2'
DWT_stream.header['unit-col-var2'] = 'nT^2'
DWT_stream.header['col-var3'] = 'D3'
DWT_stream.header['unit-col-var3'] = 'nT^2'
# Plot stream:
if plot == True:
date = datetime.strftime(num2date(self.ndarray[0][0]),'%Y-%m-%d')
logger.info('DWT_calc: Plotting data...')
if outfile:
DWT_stream.plot(['x','var1','var2','var3'],
plottitle="DWT Decomposition of %s (%s)" % (key,date),
outfile=outfile)
else:
DWT_stream.plot(['x','var1','var2','var3'],
plottitle="DWT Decomposition of %s (%s)" % (key,date))
#return DWT_stream
return DataStream([LineStruct()], headers, np.asarray([np.asarray(a) for a in array]))
def eventlogger(self, key, values, compare=None, stringvalues=None, addcomment=None, debugmode=None):
"""
read stream and log data of which key meets the criteria
maybe combine with extract
Required:
:type key: string
:param key: provide the key to be examined
:type values: list
:param values: provide a list of three values
:type values: list
:param values: provide a list of three values
Optional:
:type compare: string
:param compare: ">, <, ==, !="
:type stringvalues: list
:param stringvalues: provide a list of exactly the same length as values with the respective comments
:type addcomment: bool
:param addcomment: if true add the stringvalues to the comment line of the datastream
:type debugmode: bool
:param debugmode: provide more information
example:
compare is string like ">, <, ==, !="
st.eventlogger(['var3'],[15,20,30],'>')
"""
assert type(values) == list
if not compare:
compare = '=='
if not compare in ['<','>','<=','>=','==','!=']:
logger.warning('Eventlogger: wrong value for compare: needs to be among <,>,<=,>=,==,!=')
return self
if not stringvalues:
stringvalues = ['Minor storm onset','Moderate storm onset','Major storm onset']
else:
assert type(stringvalues) == list
if not len(stringvalues) == len(values):
logger.warning('Eventlogger: Provided comments do not match amount of values')
return self
for elem in self:
#evaluationstring = 'elem.' + key + ' ' + compare + ' ' + str(values[0])
if eval('elem.'+key+' '+compare+' '+str(values[2])):
stormlogger.warning('%s at %s' % (stringvalues[2],num2date(elem.time).replace(tzinfo=None)))
if addcomment:
if elem.comment == '-':
elem.comment = stringvalues[2]
else:
elem.comment += ', ' + stringvalues[2]
elif eval('elem.'+key+' '+compare+' '+str(values[1])):
stormlogger.warning('%s at %s' % (stringvalues[1],num2date(elem.time).replace(tzinfo=None)))
if addcomment:
if elem.comment == '-':
elem.comment = stringvalues[1]
else:
elem.comment += ', ' + stringvalues[1]
elif eval('elem.'+key+' '+compare+' '+str(values[0])):
stormlogger.warning('%s at %s' % (stringvalues[0],num2date(elem.time).replace(tzinfo=None)))
if addcomment:
if elem.comment == '-':
elem.comment = stringvalues[0]
else:
elem.comment += ', ' + stringvalues[0]
return self
def extract(self, key, value, compare=None, debugmode=None):
"""
DEFINITION:
Read stream and extract data of the selected key which meets the choosen criteria
PARAMETERS:
Variables:
- key: (str) streams key e.g. 'x'.
- value: (str/float/int) any selected input which should be tested for
special note: if value is in brackets, then the term is evaluated
e.g. value="('int(elem.time)')" selects all points at 0:00
Important: this only works for compare = '=='
Kwargs:
- compare: (str) criteria, one out of ">=", "<=",">", "<", "==", "!=", default is '=='
- debugmode:(bool) if true several additional outputs will be created
RETURNS:
- DataStream with selected values only
EXAMPLES:
>>> extractedstream = stream.extract('x',20000,'>')
>>> extractedstream = stream.extract('str1','Berger')
"""
if not compare:
compare = '=='
if not compare in [">=", "<=",">", "<", "==", "!=", 'like']:
logger.info('--- Extract: Please provide proper compare parameter ">=", "<=",">", "<", "==", "like" or "!=" ')
return self
if value in ['',None]:
return self
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
ind = KEYLIST.index(key)
stream = self.copy()
if not self._is_number(value):
if value.startswith('(') and value.endswith(')') and compare == '==':
logger.info("extract: Selected special functional type -equality defined by difference less then 10 exp-6")
if ndtype:
val = eval(value[1:-1])
indexar = np.where((np.abs(stream.ndarray[ind]-val)) < 0.000001)[0]
else:
val = value[1:-1]
liste = []
for elem in self:
if abs(eval('elem.'+key) - eval(val)) < 0.000001:
liste.append(elem)
return DataStream(liste,self.header)
else:
#print "Found String", ndtype
too = '"' + str(value) + '"'
if ndtype:
if compare == 'like':
indexar = np.asarray([i for i, s in enumerate(stream.ndarray[ind]) if str(value) in s])
else:
#print stream.ndarray[ind]
searchclause = 'stream.ndarray[ind] '+ compare + ' ' + too
#print searchclause, ind, key
indexar = eval('np.where('+searchclause+')[0]')
#print indexar, len(indexar)
else:
too = str(value)
if ndtype:
searchclause = 'stream.ndarray[ind].astype(float) '+ compare + ' ' + too
with np.errstate(invalid='ignore'):
indexar = eval('np.where('+searchclause+')[0]')
if ndtype:
for ind,el in enumerate(stream.ndarray):
if len(stream.ndarray[ind]) > 0:
ar = [stream.ndarray[ind][i] for i in indexar]
stream.ndarray[ind] = np.asarray(ar).astype(object)
return stream
else:
liste = [elem for elem in self if eval('elem.'+key+' '+ compare + ' ' + too)]
return DataStream(liste,self.header,self.ndarray)
def extract2(self, keys, get='>', func=None, debugmode=None):
"""
DEFINITION:
Read stream and extract data of the selected keys which meets the choosen criteria
PARAMETERS:
Variables:
- keys: (list) keylist like ['x','f'].
- func: a function object
Kwargs:
- get: (str) criteria, one out of ">=", "<=",">", "<", "==", "!=", default is '=='
- debugmode:(bool) if true several additional outputs will be created
RETURNS:
- DataStream with selected values only
EXAMPLES:
>>> extractedstream = stream.extract('x',20000,'>')
>>> extractedstream = stream.extract('str1','Berger')
"""
if not get:
get = '=='
if not get in [">=", "<=",">", "<", "==", "!=", 'like']:
print ('--- Extract: Please provide proper compare parameter ">=", "<=",">", "<", "==", "like" or "!=" ')
return self
stream = self.copy()
def func(x):
y = 1/(0.2*exp(0.06/(x/10000.))) + 2.5
return y
xpos = KEYLIST.index(keys[0])
ypos = KEYLIST.index(keys[1])
x = stream.ndarray[xpos].astype(float)
y = stream.ndarray[ypos].astype(float)
idxlist = []
for idx,val in enumerate(x):
ythreshold = func(val)
test = eval('y[idx] '+ get + ' ' + str(ythreshold))
#print (val, 'y[idx] '+ get + ' ' + str(ythreshold))
if test:
idxlist.append(idx)
array = [[] for key in KEYLIST]
for i,key in enumerate(KEYLIST):
for idx in idxlist:
if len(stream.ndarray[i]) > 0:
array[i].append(stream.ndarray[i][idx])
array[i] = np.asarray(array[i])
print ("Length of list", len(idxlist))
return DataStream([LineStruct()], stream.header,np.asarray(array))
def extrapolate(self, start, end):
"""
DESCRIPTION:
Reads stream output of absolute analysis and extrapolate the data
current method (too be improved if necessary):
- repeat the last and first input with baseline values at disered start and end time
Hereby and functional fit (e.g. spline or polynom is forced towards a quasi-stable baseline evolution).
The principle asumption of this technique is that the base values are constant on average.
APPLICATION:
is used by stream.baseline
"""
ltime = date2num(end) # + timedelta(days=1))
ftime = date2num(start) # - timedelta(days=1))
array = [[] for key in KEYLIST]
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
firsttime = np.min(self.ndarray[0])
lasttime = np.max(self.ndarray[0])
# Find the last element with baseline values - assuming a sorted array
inddx = KEYLIST.index('dx')
lastind=len(self.ndarray[0])-1
#print("Extrapolate", self.ndarray,len(self.ndarray[inddx]), self.ndarray[inddx], self.ndarray[inddx][lastind])
while np.isnan(float(self.ndarray[inddx][lastind])):
lastind = lastind-1
firstind=0
while np.isnan(float(self.ndarray[inddx][firstind])):
firstind = firstind+1
#print "extrapolate", num2date(ftime), num2date(ltime), ftime, ltime
for idx,elem in enumerate(self.ndarray):
if len(elem) > 0:
array[idx] = self.ndarray[idx]
if idx == 0:
array[idx] = np.append(array[idx],ftime)
array[idx] = np.append(array[idx],ltime)
#array[idx] = np.append(self.ndarray[idx],ftime)
#array[idx] = np.append(self.ndarray[idx],ltime)
else:
array[idx] = np.append(array[idx],array[idx][firstind])
array[idx] = np.append(array[idx],array[idx][lastind])
#array[idx] = np.append(self.ndarray[idx],self.ndarray[idx][firstind])
#array[idx] = np.append(self.ndarray[idx],self.ndarray[idx][lastind])
indar = np.argsort(array[0])
array = [el[indar].astype(object) if len(el)>0 else np.asarray([]) for el in array]
else:
if self.length()[0] < 2:
return self
firstelem = self[0]
lastelem = self[-1]
# Find the last element with baseline values
i = 1
while isnan(lastelem.dx):
lastelem = self[-i]
i = i +1
line = LineStruct()
for key in KEYLIST:
if key == 'time':
line.time = ftime
else:
exec('line.'+key+' = firstelem.'+key)
self.add(line)
line = LineStruct()
for key in KEYLIST:
if key == 'time':
line.time = ltime
else:
exec('line.'+key+' = lastelem.'+key)
self.add(line)
stream = DataStream(self,self.header,np.asarray(array,dtype=object))
#print "extra", stream.ndarray
#print "extra", stream.length()
#stream = stream.sorting()
return stream
#return DataStream(self,self.header,self.ndarray)
def filter(self,**kwargs):
"""
DEFINITION:
Uses a selected window to filter the datastream - similar to the smooth function.
(take a look at the Scipy Cookbook/Signal Smooth)
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
This function is approximately twice as fast as the previous version.
Difference: Gaps of the stream a filled by time steps with NaNs in the data columns
By default missing values are interpolated if more than 90 percent of data is present
within the window range. This is used to comply with INTERMAGNET rules. Set option
conservative to False to avoid this.
PARAMETERS:
Kwargs:
- keys: (list) List of keys to smooth
- filter_type: (string) name of the window. One of
'flat','barthann','bartlett','blackman','blackmanharris','bohman',
'boxcar','cosine','flattop','hamming','hann','nuttall',
'parzen','triang','gaussian','wiener','spline','butterworth'
See http://docs.scipy.org/doc/scipy/reference/signal.html
- filter_width: (timedelta) window width of the filter
- resample_period: (int) resampling interval in seconds (e.g. 1 for one second data)
leave blank for standard filters as it will be automatically selected
- noresample: (bool) if True the data set is resampled at filter_width positions
- missingdata: (string) define how to deal with missing data
'conservative' (default): no filtering
'interpolate': interpolate if less than 10% are missing
'mean': use mean if less than 10% are missing'
- conservative: (bool) if True than no interpolation is performed
- autofill: (list) of keys: provide a keylist for which nan values are linearly interpolated before filtering - use with care, might be useful if you have low resolution parameters asociated with main values like (humidity etc)
- resampleoffset: (timedelta) if provided the offset will be added to resamples starttime
- resamplemode: (string) if 'fast' then fast resampling is used
- testplot: (bool) provides a plot of unfiltered and filtered data for each key if true
- dontfillgaps: (bool) if true, get_gaps will not be conducted - much faster but requires the absence of data gaps (including time step)
RETURNS:
- self: (DataStream) containing the filtered signal within the selected columns
EXAMPLE:
>>> nice_data = bad_data.filter(keys=['x','y','z'])
or
>>> nice_data = bad_data.filter(filter_type='gaussian',filter_width=timedelta(hours=1))
APPLICATION:
TODO:
!!A proper and correct treatment of gaps within the dataset to be filtered is missing!!
"""
# ########################
# Kwargs and definitions
# ########################
filterlist = ['flat','barthann','bartlett','blackman','blackmanharris','bohman',
'boxcar','cosine','flattop','hamming','hann','nuttall','parzen','triang',
'gaussian','wiener','spline','butterworth']
# To be added
#kaiser(M, beta[, sym]) Return a Kaiser window.
#slepian(M, width[, sym]) Return a digital Slepian (DPSS) window.
#chebwin(M, at[, sym]) Return a Dolph-Chebyshev window.
# see http://docs.scipy.org/doc/scipy/reference/signal.html
keys = kwargs.get('keys')
filter_type = kwargs.get('filter_type')
filter_width = kwargs.get('filter_width')
resample_period = kwargs.get('resample_period')
filter_offset = kwargs.get('filter_offset')
noresample = kwargs.get('noresample')
resamplemode = kwargs.get('resamplemode')
resamplestart = kwargs.get('resamplestart')
resampleoffset = kwargs.get('resampleoffset')
testplot = kwargs.get('testplot')
autofill = kwargs.get('autofill')
dontfillgaps = kwargs.get('dontfillgaps')
fillgaps = kwargs.get('fillgaps')
debugmode = kwargs.get('debugmode')
conservative = kwargs.get('conservative')
missingdata = kwargs.get('missingdata')
sr = self.samplingrate()
if not keys:
keys = self._get_key_headers(numerical=True)
if not filter_width and not resample_period:
if sr < 0.5: # use 1 second filter with 0.3 Hz cut off as default
filter_width = timedelta(seconds=3.33333333)
resample_period = 1.0
else: # use 1 minute filter with 0.008 Hz cut off as default
filter_width = timedelta(minutes=2)
resample_period = 60.0
if not filter_width: # resample_period obviously provided - use nyquist
filter_width = timedelta(seconds=2*resample_period)
if not resample_period: # filter_width obviously provided... use filter_width as period
resample_period = filter_width.total_seconds()
# Fall back for old data
if filter_width == timedelta(seconds=1):
filter_width = timedelta(seconds=3.3)
resample_period = 1.0
if not noresample:
resample = True
else:
resample = False
if not autofill:
autofill = []
else:
if not isinstance(autofill, (list, tuple)):
print("Autofill need to be a keylist")
return
if not resamplemode:
resamplefast = False
else:
if resamplemode == 'fast':
resamplefast = True
else:
resamplefast = False
if not debugmode:
debugmode = None
if not filter_type:
filter_type = 'gaussian'
if resamplestart:
print("############## Warning ##############")
print("option RESAMPLESTART is not used any more. Switch to resampleoffset for modifying time steps")
if not missingdata:
missingdata = 'conservative'
ndtype = False
# ########################
# Basic validity checks and window size definitions
# ########################
if not filter_type in filterlist:
logger.error("smooth: Window is none of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman', etc")
logger.debug("smooth: You entered non-existing filter type - %s - " % filter_type)
return self
logger.info("filter: Filtering with {} window".format(filter_type))
#print self.length()[0]
if not self.length()[0] > 1:
logger.error("Filter: stream needs to contain data - returning.")
return self
if debugmode:
print("Starting length:", self.length())
#if not dontfillgaps: ### changed--- now using dont fill gaps as default
if fillgaps:
self = self.get_gaps()
if debugmode:
print("length after getting gaps:", len(self))
window_period = filter_width.total_seconds()
si = timedelta(seconds=self.get_sampling_period()*24*3600)
sampling_period = si.days*24*3600 + si.seconds + np.round(si.microseconds/1000000.0,2)
if debugmode:
print("Timedelta and sampling period:", si, sampling_period)
# window_len defines the window size in data points assuming the major sampling period to be valid for the dataset
if filter_type == 'gaussian':
# For a gaussian fit
window_len = np.round((window_period/sampling_period))
#print (window_period,sampling_period,window_len)
# Window length needs to be odd number:
if window_len % 2 == 0:
window_len = window_len +1
std = 0.83255461*window_len/(2*np.pi)
trangetmp = self._det_trange(window_period)*24*3600
if trangetmp < 1:
trange = np.round(trangetmp,3)
else:
trange = timedelta(seconds=(self._det_trange(window_period)*24*3600)).seconds
if debugmode:
print("Window character: ", window_len, std, trange)
else:
window_len = np.round(window_period/sampling_period)
if window_len % 2:
window_len = window_len+1
trange = window_period/2
if sampling_period >= window_period:
logger.warning("Filter: Sampling period is equal or larger then projected filter window - returning.")
return self
# ########################
# Reading data of each selected column in stream
# ########################
if len(self.ndarray[0])>0:
t = self.ndarray[0]
ndtype = True
else:
t = self._get_column('time')
if debugmode:
print("Length time column:", len(t))
window_len = int(window_len)
for key in keys:
if debugmode:
print ("Start filtering for", key)
if not key in KEYLIST:
logger.error("Column key %s not valid." % key)
keyindex = KEYLIST.index(key)
if len(self.ndarray[keyindex])>0:
v = self.ndarray[keyindex]
else:
v = self._get_column(key)
# INTERMAGNET 90 percent rule: interpolate missing values if less than 10 percent are missing
#if not conservative or missingdata in ['interpolate','mean']:
if missingdata in ['interpolate','mean']:
fill = 'mean'
try:
if missingdata == 'interpolate':
fill = missingdate
else:
fill = 'mean'
except:
fill = 'mean'
v = self.missingvalue(v,np.round(window_period/sampling_period),fill=fill) # using ratio here and not _len
if key in autofill:
logger.warning("Filter: key %s has been selected for linear interpolation before filtering." % key)
logger.warning("Filter: I guess you know what you are doing...")
nans, x= nan_helper(v)
v[nans]= interp(x(nans), x(~nans), v[~nans])
# Make sure that we are dealing with numbers
v = np.array(list(map(float, v)))
if v.ndim != 1:
logger.error("Filter: Only accepts 1 dimensional arrays.")
if window_len<3:
logger.error("Filter: Window lenght defined by filter_width needs to cover at least three data points")
if debugmode:
print("Treating k:", key, v.size)
if v.size >= window_len:
#print ("Check:", v, len(v), window_len)
s=np.r_[v[int(window_len)-1:0:-1],v,v[-1:-int(window_len):-1]]
if filter_type == 'gaussian':
w = signal.gaussian(window_len, std=std)
y=np.convolve(w/w.sum(),s,mode='valid')
res = y[(int(window_len/2)):(len(v)+int(window_len/2))]
elif filter_type == 'wiener':
res = signal.wiener(v, int(window_len), noise=0.5)
elif filter_type == 'butterworth':
dt = 800./float(len(v))
nyf = 0.5/dt
b, a = signal.butter(4, 1.5/nyf)
res = signal.filtfilt(b, a, v)
elif filter_type == 'spline':
res = UnivariateSpline(t, v, s=240)
elif filter_type == 'flat':
w=np.ones(int(window_len),'d')
s = np.ma.masked_invalid(s)
y=np.convolve(w/w.sum(),s,mode='valid') #'valid')
res = y[(int(window_len/2)-1):(len(v)+int(window_len/2)-1)]
else:
w = eval('signal.'+filter_type+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
res = y[(int(window_len/2)):(len(v)+int(window_len/2))]
if testplot == True:
fig, ax1 = plt.subplots(1,1, figsize=(10,4))
ax1.plot(t, v, 'b.-', linewidth=2, label = 'raw data')
ax1.plot(t, res, 'r.-', linewidth=2, label = filter_type)
plt.show()
if ndtype:
self.ndarray[keyindex] = res
else:
self._put_column(res,key)
if resample:
if debugmode:
print("Resampling: ", keys)
self = self.resample(keys,period=resample_period,fast=resamplefast,offset=resampleoffset)
self.header['DataSamplingRate'] = str(resample_period) + ' sec'
# ########################
# Update header information
# ########################
passband = filter_width.total_seconds()
#print ("passband", 1/passband)
#self.header['DataSamplingFilter'] = filter_type + ' - ' + str(trange) + ' sec'
self.header['DataSamplingFilter'] = filter_type + ' - ' + str(1.0/float(passband)) + ' Hz'
return self
def nfilter(self, **kwargs):
"""
DEFINITION:
Code for simple application, filtering function.
Returns stream with filtered data with sampling period of
filter_width.
PARAMETERS:
Variables:
- variable: (type) Description.
Kwargs:
- filter_type: (str) Options: gaussian, linear or special. Default = gaussian.
- filter_width: (timedelta object) Default = timedelta(minutes=1)
- filter_offset: (timedelta object) Default=0
- gauss_win: (int) Default = 1.86506 (corresponds to +/-45 sec in case of min or 45 min in case of hour).
- fmi_initial_data: (DataStream containing dH values (dx)) Default=[].
RETURNS:
- stream: (DataStream object) Stream containing filtered data.
EXAMPLE:
>>> stream_filtered = stream.filter(filter_width=timedelta(minutes=3))
APPLICATION:
"""
return self.filter(**kwargs)
def fit(self, keys, **kwargs):
"""
DEFINITION:
Code for fitting data. Please note: if nans are present in any of the selected keys
the whole line is dropped before fitting.
PARAMETERS:
Variables:
- keys: (list) Provide a list of keys to be fitted (e.g. ['x','y','z'].
Kwargs:
- fitfunc: (str) Options: 'poly', 'harmonic', 'least-squares', 'spline', 'none', default='spline'
- timerange: (timedelta object) Default = timedelta(hours=1)
- fitdegree: (float) Default=5
- knotstep: (float < 0.5) determines the amount of knots: amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
- flag: (bool).
RETURNS:
- function object: (list) func = [functionkeylist, sv, ev]
EXAMPLE:
>>> func = stream.fit(['x'])
APPLICATION:
"""
# Defaults:
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
if not fitfunc:
fitfunc = 'spline'
if not fitdegree:
fitdegree = 5
if not knotstep:
knotstep = 0.01
defaulttime = 0
if not starttime:
starttime = self._find_t_limits()[0]
if not endtime:
endtime = self._find_t_limits()[1]
if starttime == self._find_t_limits()[0]:
defaulttime += 1
if endtime == self._find_t_limits()[1]:
defaulttime += 1
if knotstep >= 0.5:
raise ValueError("Knotstep needs to be smaller than 0.5")
functionkeylist = {}
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype=True
#tok = True
fitstream = self.copy()
if not defaulttime == 2: # TODO if applied to full stream, one point at the end is missing
fitstream = fitstream.trim(starttime=starttime, endtime=endtime)
sv = 0
ev = 0
for key in keys:
tmpst = fitstream._drop_nans(key)
#print ("Length", tmpst.length())
if ndtype:
t = tmpst.ndarray[0]
else:
t = tmpst._get_column('time')
if len(t) < 1:
#tok = False
print ("Column {} does not contain valid values".format(key))
continue
nt,sv,ev = fitstream._normalize(t)
sp = fitstream.get_sampling_period()
if sp == 0: ## if no dominant sampling period can be identified then use minutes
sp = 0.0177083333256
if not key in KEYLIST[1:16]:
raise ValueError("Column key not valid")
if ndtype:
ind = KEYLIST.index(key)
val = tmpst.ndarray[ind]
else:
val = tmpst._get_column(key)
# interplolate NaN values
# normalized sampling rate
sp = sp/(ev-sv) # should be the best?
#sp = (ev-sv)/len(val) # does not work
x = arange(np.min(nt),np.max(nt),sp)
#print len(x)
if len(val)<=1:
logger.warning('Fit: No valid data for key {}'.format(key))
break
elif fitfunc == 'spline':
try:
#logger.error('Interpolation: Testing knots (knotsteps = {}), (len(val) = {}'.format(knotstep, len(val)))
knots = np.array(arange(np.min(nt)+knotstep,np.max(nt)-knotstep,knotstep))
if len(knots) > len(val):
knotstep = knotstep*4
knots = np.array(arange(np.min(nt)+knotstep,np.max(nt)-knotstep,knotstep))
logger.warning('Too many knots in spline for available data. Please check amount of fitted data in time range. Trying to reduce resolution ...')
ti = interpolate.splrep(nt, val, k=3, s=0, t=knots)
except:
logger.error('Value error in fit function - likely reason: no valid numbers or too few numbers for fit: len(knots)={} > len(val)={}? '.format(len(knots),len(val)))
print ("Checking", key, len(val), val, sp, knotstep, len(knots))
raise ValueError("Value error in fit function - not enough data or invalid numbers")
return
#print nt, val, len(knots), knots
#ti = interpolate.interp1d(nt, val, kind='cubic')
#print "X", x, np.min(nt),np.max(nt),sp
#print "TI", ti
f_fit = interpolate.splev(x,ti)
elif fitfunc == 'poly':
logger.debug('Selected polynomial fit - amount of data: %d, time steps: %d, degree of fit: %d' % (len(nt), len(val), fitdegree))
ti = polyfit(nt, val, fitdegree)
f_fit = polyval(ti,x)
elif fitfunc == 'mean':
logger.debug('Selected mean fit - amount of data: {}, time steps: {}'.format(len(nt), len(val)))
meanvalue = np.nanmean(val)
meanval = np.asarray([meanvalue for el in val])
ti = polyfit(nt, meanval, 1)
f_fit = polyval(ti,x)
elif fitfunc == 'harmonic':
logger.debug('Selected harmonic fit - using inverse fourier transform')
f_fit = self.harmfit(nt, val, fitdegree)
# Don't use resampled list for harmonic time series
x = nt
elif fitfunc == 'least-squares':
logger.debug('Selected linear least-squares fit')
A = np.vstack([nt, np.ones(len(nt))]).T
m, c, = np.linalg.lstsq(A, val)[0]
f_fit = m * x + c
elif fitfunc == 'none':
logger.debug('Selected no fit')
return
else:
logger.warning('Fit: function not valid')
return
exec('f'+key+' = interpolate.interp1d(x, f_fit, bounds_error=False)')
exec('functionkeylist["f'+key+'"] = f'+key)
#if tok:
func = [functionkeylist, sv, ev]
#else:
# func = [functionkeylist, 0, 0]
return func
def extractflags(self, debug=False):
"""
DEFINITION:
Extracts flags asociated with the provided DataStream object
(as obtained by flaggedstream = stream.flag_outlier())
PARAMETERS:
Variables:
None
RETURNS:
- flaglist: (list) a flaglist of type [st,et,key,flagnumber,commentarray[idx],sensorid,now]
EXAMPLE:
>>> flaglist = stream.extractflags()
"""
sensorid = self.header.get('SensorID','')
now = datetime.utcnow()
flaglist = []
flpos = KEYLIST.index('flag')
compos = KEYLIST.index('comment')
flags = self.ndarray[flpos]
comments = self.ndarray[compos]
if not len(flags) > 0 or not len(comments) > 0:
return flaglist
uniqueflags = self.union(flags)
uniquecomments = self.union(comments)
# 1. Extract relevant keys from uniqueflags
if debug:
print ("extractflags: Unique Flags -", uniqueflags)
print ("extractflags: Unique Comments -", uniquecomments)
# zeroflag = ''
keylist = []
for elem in uniqueflags:
if not elem in ['','-']:
#print (elem)
for idx,el in enumerate(elem):
if not el == '-' and el in ['0','1','2','3','4','5','6']:
keylist.append(NUMKEYLIST[idx-1])
# 2. Cycle through keys and extract comments
if not len(keylist) > 0:
return flaglist
keylist = self.union(np.asarray(keylist))
for key in keylist:
indexflag = KEYLIST.index(key)
for comment in uniquecomments:
flagindicies = []
for idx, elem in enumerate(comments):
if not elem == '' and elem == comment:
#print ("ELEM", elem)
flagindicies.append(idx)
# 2. get consecutive groups
for k, g in groupby(enumerate(flagindicies), lambda ix: ix[0] - ix[1]):
try:
consecutives = list(map(itemgetter(1), g))
st = num2date(self.ndarray[0][consecutives[0]]).replace(tzinfo=None)
et = num2date(self.ndarray[0][consecutives[-1]]).replace(tzinfo=None)
flagnumber = flags[consecutives[0]][indexflag]
if not flagnumber in ['-',None]:
flaglist.append([st,et,key,int(flagnumber),comment,sensorid,now])
except:
print ("extractflags: error when extracting flaglist")
return flaglist
def flagfast(self,indexarray,flag, comment,keys=None):
"""
DEFINITION:
Add a flag to specific indicies of the streams ndarray.
PARAMETERS:
Variables:
- keys: (list) Optional: list of keys to mark ['x','y','z']
- flag: (int) 0 ok, 1 remove, 2 force ok, 3 force remove,
4 merged from other instrument
- comment: (str) The reason for flag
- indexarray: (array) indicies of the datapoint(s) to mark
RETURNS:
- DataStream: Input stream with flags and comments.
EXAMPLE:
>>> data = data.flagfast([155],'3','Lawnmower',['x','y','z'])
APPLICATION:
"""
print("Adding flags .... ")
# Define Defaultflag
flagls = [str('-') for elem in FLAGKEYLIST]
defaultflag = ''
# Get new flag
newflagls = []
if not keys:
for idx,key in enumerate(FLAGKEYLIST): # Flag all existing data
if len(self.ndarray[idx]) > 0:
newflagls.append(str(flag))
else:
newflagls.append('-')
newflag = ''.join(newflagls)
else:
for idx,key in enumerate(FLAGKEYLIST): # Only key column
if len(self.ndarray[idx]) > 0 and FLAGKEYLIST[idx] in keys:
newflagls.append(str(flag))
else:
newflagls.append('-')
newflag = ''.join(newflagls)
flagarray, commentarray = [],[]
flagindex = KEYLIST.index('flag')
commentindex = KEYLIST.index('comment')
# create a predefined list
# ########################
# a) get existing flags and comments or create empty lists
if len(self.ndarray[flagindex]) > 0:
flagarray = self.ndarray[flagindex].astype(object)
else:
flagarray = [''] * len(self.ndarray[0])
if len(self.ndarray[commentindex]) > 0:
commentarray = self.ndarray[commentindex].astype(object)
else:
commentarray = [''] * len(self.ndarray[0])
# b) insert new info
for i in indexarray:
flagarray[i] = newflag
commentarray[i] = comment
commentarray = np.asarray(commentarray, dtype='object')
flagarray = np.asarray(flagarray, dtype='object')
flagnum = KEYLIST.index('flag')
commentnum = KEYLIST.index('comment')
self.ndarray[flagnum] = flagarray
self.ndarray[commentnum] = commentarray
#print "... finished"
return self
def flag_range(self, **kwargs):
"""
DEFINITION:
Flags data within time range or data exceeding a certain threshold
Coding : 0 take, 1 remove, 2 force take, 3 force remove
PARAMETERS:
Variables:
- None.
Kwargs:
- keys: (list) List of keys to check for criteria. Default = all numerical
please note: for using above and below criteria only one element
need to be provided (e.g. ['x']
- text (string) comment
- flagnum (int) Flagid
- keystoflag: (list) List of keys to flag. Default = all numerical
- below: (float) flag data of key below this numerical value.
- above: (float) flag data of key exceeding this numerical value.
- starttime: (datetime Object)
- endtime: (datetime Object)
RETURNS:
- flaglist: (list) flagging information - use stream.flag(flaglist) to add to stream
EXAMPLE:
>>> fllist = stream.flag_range(keys=['x'], above=80)
APPLICATION:
"""
keys = kwargs.get('keys')
above = kwargs.get('above')
below = kwargs.get('below')
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
text = kwargs.get('text')
flagnum = kwargs.get('flagnum')
keystoflag = kwargs.get('keystoflag')
numuncert = 0.0000000001 # numerical uncertainty on different machines when using date2num()
sensorid = self.header.get('SensorID')
moddate = datetime.utcnow()
flaglist=[]
if not keystoflag:
keystoflag = self._get_key_headers(numerical=True)
if not flagnum:
flagnum = 0
if not len(self.ndarray[0]) > 0:
print ("flag_range: No data available - aborting")
return flaglist
if not len(keys) == 1:
if above or below:
print ("flag_range: for using thresholds above and below only a single key needs to be provided")
print (" -- ignoring given above and below values")
below = False
above = False
# test validity of starttime and endtime
trimmedstream = self.copy()
if starttime and endtime:
trimmedstream = self._select_timerange(starttime=starttime,endtime=endtime)
trimmedstream = DataStream([LineStruct()],self.header,trimmedstream)
elif starttime:
trimmedstream = self._select_timerange(starttime=starttime)
trimmedstream = DataStream([LineStruct()],self.header,trimmedstream)
elif endtime:
trimmedstream = self._select_timerange(endtime=endtime)
trimmedstream = DataStream([LineStruct()],self.header,trimmedstream)
if not above and not below:
# return flags for all data in trimmed stream
for elem in keystoflag:
flagline = [num2date(trimmedstream.ndarray[0][0]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndarray[0][-1]-numuncert).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.append(flagline)
return flaglist
if above and below:
# TODO create True/False list and then follow the bin detector example
ind = KEYLIST.index(keys[0])
trueindicies = (trimmedstream.ndarray[ind] > above) & (trimmedstream.ndarray[ind] < below)
d = np.diff(trueindicies)
idx, = d.nonzero()
idx += 1
if not text:
text = 'outside of range {} to {}'.format(below,above)
if trueindicies[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if trueindicies[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, trimmedstream.ndarray[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
# numerical uncertainty is subtracted from both time steps, as the flagging procedure (findtime) links
# flags to the exact time stamp or, if not found, due to numerical diffs, to the next timestamp
flagline = [num2date(trimmedstream.ndarray[0][start]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndarray[0][stop]-numuncert).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.append(flagline)
elif above:
# TODO create True/False list and then follow the bin detector example
ind = KEYLIST.index(keys[0])
trueindicies = trimmedstream.ndarray[ind] > above
d = np.diff(trueindicies)
idx, = d.nonzero()
idx += 1
if not text:
text = 'exceeding {}'.format(above)
if trueindicies[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if trueindicies[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, trimmedstream.ndarray[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(trimmedstream.ndarray[0][start]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndarray[0][stop]-numuncert).replace(tzinfo=None),elem,int(flagnum),text,sensorid,moddate]
flaglist.append(flagline)
elif below:
# TODO create True/False the other way round
ind = KEYLIST.index(keys[0])
truefalse = trimmedstream.ndarray[ind] < below
d = np.diff(truefalse)
idx, = d.nonzero()
idx += 1
if not text:
text = 'below {}'.format(below)
if truefalse[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if truefalse[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, trimmedstream.ndarray[ind].size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
for start,stop in idx:
stop = stop-1
for elem in keystoflag:
flagline = [num2date(trimmedstream.ndarray[0][start]-numuncert).replace(tzinfo=None),num2date(trimmedstream.ndarray[0][stop]-numuncert).replace(tzinfo=None),elem,int(flagnum),str(text),sensorid,moddate]
flaglist.append(flagline)
return flaglist
def flag_outlier(self, **kwargs):
"""
DEFINITION:
Flags outliers in data, using quartiles.
Coding : 0 take, 1 remove, 2 force take, 3 force remove
Example:
0000000, 0001000, etc
012 = take f, automatically removed v, and force use of other
300 = force remove f, take v, and take other
PARAMETERS:
Variables:
- None.
Kwargs:
- keys: (list) List of keys to evaluate. Default = all numerical
- threshold: (float) Determines threshold for outliers.
1.5 = standard
5 = weak condition, keeps storm onsets in (default)
4 = a useful comprimise to be used in automatic analysis.
- timerange: (timedelta Object) Time range. Default = samlingrate(sec)*600
- stdout: prints removed values to stdout
- returnflaglist (bool) if True, a flaglist is returned instead of stream
- markall (bool) default is False. If True, all components (provided keys)
are flagged even if outlier is only detected in one. Useful for
vectorial data
RETURNS:
- stream: (DataStream Object) Stream with flagged data.
EXAMPLE:
>>> stream.flag_outlier(keys=['x','y','z'], threshold=2)
APPLICATION:
"""
# Defaults:
timerange = kwargs.get('timerange')
threshold = kwargs.get('threshold')
keys = kwargs.get('keys')
markall = kwargs.get('markall')
stdout = kwargs.get('stdout')
returnflaglist = kwargs.get('returnflaglist')
sr = self.samplingrate()
flagtimeprev = 0
startflagtime = 0
numuncert = 0.0000000001 # numerical uncertainty on different machines when using date2num()
if not timerange:
sr = self.samplingrate()
timerange = timedelta(seconds=sr*600)
if not keys:
keys = self._get_key_headers(numerical=True)
if not threshold:
threshold = 5.0
cdate = datetime.utcnow().replace(tzinfo=None)
sensorid = self.header.get('SensorID','')
flaglist = []
# Position of flag in flagstring
# f (intensity): pos 0
# x,y,z (vector): pos 1
# other (vector): pos 2
if not len(self.ndarray[0]) > 0:
logger.info('flag_outlier: No ndarray - starting old remove_outlier method.')
self = self.remove_outlier(keys=keys,threshold=threshold,timerange=timerange,stdout=stdout,markall=markall)
return self
logger.info('flag_outlier: Starting outlier identification...')
flagidx = KEYLIST.index('flag')
commentidx = KEYLIST.index('comment')
if not len(self.ndarray[flagidx]) > 0:
self.ndarray[flagidx] = [''] * len(self.ndarray[0])
else:
self.ndarray[flagidx] = self.ndarray[flagidx].astype(object)
if not len(self.ndarray[commentidx]) > 0:
self.ndarray[commentidx] = [''] * len(self.ndarray[0])
else:
self.ndarray[commentidx] = self.ndarray[commentidx].astype(object)
# get a poslist of all keys - used for markall
flagposls = [FLAGKEYLIST.index(key) for key in keys]
# Start here with for key in keys:
for key in keys:
flagpos = FLAGKEYLIST.index(key)
if not len(self.ndarray[flagpos]) > 0:
print("Flag_outlier: No data for key %s - skipping" % key)
break
print ("-------------------------")
print ("Dealing with key:", key)
st = 0
et = len(self.ndarray[0])
incrt = int(timerange.total_seconds()/sr)
if incrt == 0:
print("Flag_outlier: check timerange ... seems to be smaller as sampling rate")
break
at = incrt
while st < et:
idxst = st
idxat = at
st = at
at += incrt
if idxat > et:
idxat = et
#print key, idxst, idxat
selcol = self.ndarray[flagpos][idxst:idxat].astype(float)
selcol = selcol[~np.isnan(selcol)]
if len(selcol) > 0:
try:
q1 = stats.scoreatpercentile(selcol,16)
q3 = stats.scoreatpercentile(selcol,84)
iqd = q3-q1
md = np.median(selcol)
if iqd == 0:
iqd = 0.000001
whisker = threshold*iqd
#print key, md, iqd, whisker
except:
try:
md = np.median(selcol)
whisker = md*0.005
except:
logger.warning("remove_outlier: Eliminate outliers produced a problem: please check.")
pass
#print md, whisker, np.asarray(selcol)
for elem in range(idxst,idxat):
#print flagpos, elem
if not md-whisker < self.ndarray[flagpos][elem] < md+whisker and not np.isnan(self.ndarray[flagpos][elem]):
#print "Found:", key, self.ndarray[flagpos][elem]
#if key == 'df':
# x = 1/0
try:
if not self.ndarray[flagidx][elem] == '':
#print "Got here", self.ndarray[flagidx][elem]
newflagls = list(self.ndarray[flagidx][elem])
#print newflagls
if newflagls[flagpos] == '-':
newflagls[flagpos] = 0
if not int(newflagls[flagpos]) > 1:
newflagls[flagpos] = '1'
if markall:
for p in flagposls:
if not newflagls[p] > 1:
newflagls[p] = '1'
newflag = ''.join(newflagls)
else:
x=1/0 # Force except
except:
newflagls = []
for idx,el in enumerate(FLAGKEYLIST): # Only key column
if idx == flagpos:
newflagls.append('1')
else:
newflagls.append('-')
if markall:
for p in flagposls:
newflagls[p] = '1'
newflag = ''.join(newflagls)
self.ndarray[flagidx][elem] = newflag
#print self.ndarray[flagidx][elem]
commline = "aof - threshold: {a}, window: {b} sec".format(a=str(threshold), b=str(timerange.total_seconds()))
self.ndarray[commentidx][elem] = commline
infoline = "flag_outlier: at {a} - removed {b} (= {c})".format(a=str(self.ndarray[0][elem]), b=key, c=self.ndarray[flagpos][elem])
logger.info(infoline)
#[starttime,endtime,key,flagid,flagcomment]
flagtime = self.ndarray[0][elem]
if markall:
# if not flagtime and key and commline in flaglist
for fkey in keys:
ls = [flagtime,flagtime,fkey,1,commline]
if not ls in flaglist:
flaglist.append(ls)
else:
flaglist.append([flagtime,flagtime,key,1,commline])
if stdout:
print(infoline)
else:
try:
if not self.ndarray[flagidx][elem] == '':
pass
else:
x=1/0 # Not elegant but working
except:
self.ndarray[flagidx][elem] = ''
self.ndarray[commentidx][elem] = ''
self.ndarray[flagidx] = np.asarray(self.ndarray[flagidx])
self.ndarray[commentidx] = np.asarray(self.ndarray[commentidx])
logger.info('flag_outlier: Outlier flagging finished.')
## METHOD WHICH SORTS/COMBINES THE FLAGLIST
#print("flag_outlier",flaglist)
# Combine subsequent time steps with identical flags to one flag range
newlist = []
srday = sr/(3600.*24.)
# Keep it simple - no cleaning here - just produce new format
if len(flaglist)>0:
#flaglist = sorted(flaglist, key=lambda x: x[0])
for line in flaglist:
newlist.append([num2date(line[0]-numuncert).replace(tzinfo=None),num2date(line[1]-numuncert).replace(tzinfo=None),line[2],line[3],line[4],sensorid,cdate])
else:
newlist = []
#newlist = self.flaglistclean(newlist)
"""
# requires a sorted list
if len(flaglist)>0:
# Different keys are not regarded for here (until 0.4.6)
# 1. Extract all flag for individual keys first
for key in keys:
templist = [l for l in flaglist if l[2] == key]
fllist = sorted(templist, key=lambda x: x[0])
#flaglist = sorted(flaglist, key=lambda x: x[0])
# Startvalue of endtime is firsttime
etprev = fllist[0][1]
prevline = fllist[0]
for line in fllist:
st = line[0]
et = line[1]
diff1 = (et-etprev) # end time diff between current flag and last flag
diff2 = (st-etprev) # diff between current start and last end
srunc = srday+0.01*srday # sampling rate with uncertainty
if diff1 < srunc or diff2 < srunc:
# subsequent time step found -> changing et in line
prevline[1] = et
else:
newlist.append([num2date(prevline[0]).replace(tzinfo=None),num2date(prevline[1]).replace(tzinfo=None),prevline[2],prevline[3],prevline[4],sensorid,cdate])
prevline = line
etprev = et
#save current content of prevline with new et
newlist.append([num2date(prevline[0]).replace(tzinfo=None),num2date(prevline[1]).replace(tzinfo=None),prevline[2],prevline[3],prevline[4],sensorid,cdate])
else:
newlist = []
"""
if returnflaglist:
return newlist
return self
def flag(self, flaglist, removeduplicates=False, debug=False):
"""
DEFINITION:
Apply flaglist to stream. A flaglist typically looks like:
[starttime,endtime,key,flagid,flagcomment]
starttime and endtime are provided as datetime objects
key exists in KEYLIST
flagid is a integer number between 0 and 4
comment is a string of less then 100 characters
PARAMETERS:
- flaglist: (list) as obtained by mpplots plotFlag, database db2flaglist
RETURNS:
- DataStream: flagged version of stream.
EXAMPLE:
>>> flaglist = db.db2flaglist(db,sensorid_data)
>>> data = data.flag(flaglist)
"""
self.progress = 0
# get time range of stream:
st,et = self._find_t_limits()
st = date2num(st)
et = date2num(et)
lenfl = len(flaglist)
logger.info("Flag: Found flaglist of length {}".format(lenfl))
flaglist = [line for line in flaglist if date2num(self._testtime(line[1])) >= st]
flaglist = [line for line in flaglist if date2num(self._testtime(line[0])) <= et]
# Sort flaglist accoring to startdate (used to speed up flagging procedure)
# BETTER: Sort with input date - otherwise later data might not overwrite earlier...
flaglist = sorted(flaglist, key=lambda x: x[-1])
#flaglist.sort()
## Cleanup flaglist -- remove all inputs with duplicate start and endtime
## (use only last input)
#print("1",flaglist)
def flagclean(flaglist):
## Cleanup flaglist -- remove all inputs with duplicate start and endtime
## (use only last input)
indicies = []
for line in flaglist:
inds = [ind for ind,elem in enumerate(flaglist) if elem[0] == line[0] and elem[1] == line[1] and elem[2] == line[2]]
if len(inds) > 0:
index = inds[-1]
indicies.append(index)
uniqueidx = (list(set(indicies)))
uniqueidx.sort()
#print(uniqueidx)
flaglist = [elem for idx, elem in enumerate(flaglist) if idx in uniqueidx]
return flaglist
if removeduplicates:
flaglist = flagclean(flaglist)
lenfl = len(flaglist)
logger.info("Flag: Relevant flags: {}".format(lenfl))
## Determinig sampling rate for nearby flagging
sr = self.samplingrate()
if lenfl > 0:
for i in range(lenfl):
self.progress = (float(i)/float(lenfl)*100.)
if removeduplicates or debug or lenfl > 100:
if i == int(lenfl/5.):
print("Flag: 20 percent done")
if i == int(lenfl/5.*2.):
print("Flag: 40 percent done")
if i == int(lenfl/5.*3.):
print("Flag: 60 percent done")
if i == int(lenfl/5.*4.):
print("Flag: 80 percent done")
fs = date2num(self._testtime(flaglist[i][0]))
fe = date2num(self._testtime(flaglist[i][1]))
if st < fs and et < fs and st < fe and et < fe:
pass
elif st > fs and et > fs and st > fe and et > fe:
pass
else:
valid_chars='-_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
flaglist[i][4] = ''.join([e for e in list(flaglist[i][4]) if e in list(valid_chars)])
keys = flaglist[i][2].split('_')
for key in keys:
self = self.flag_stream(key,int(flaglist[i][3]),flaglist[i][4],flaglist[i][0],flaglist[i][1],samplingrate = sr,debug=debug)
return self
def flagliststats(self,flaglist, intensive=False, output='stdout'):
"""
DESCRIPTION:
Provides some information on flag statistics
PARAMETER:
flaglist (list) flaglist to be investigated
APPLICTAION:
flaglist = db2flaglist(db,'all')
self.flagliststats(flaglist)
"""
amountlist = []
outputt = '##########################################\n'
outputt += ' Flaglist statistics \n'
outputt += '##########################################\n'
outputt += '\n'
outputt += 'A) Total contents: {}\n'.format(len(flaglist))
outputt += '\n'
outputt += 'B) Content for each ID:\n'
#print (flaglist[0], len(flaglist[0]))
if len(flaglist[0]) > 6:
ids = [el[5] for el in flaglist]
uniquenames = list(set(ids))
for name in uniquenames:
amount = len([el[0] for el in flaglist if el[5] == name])
amountlist.append([name,amount])
if intensive:
flagli = [el for el in flaglist if el[5] == name]
index = [el[3] for el in flagli]
uniqueindicies = list(set(index))
reasons = [el[4] for el in flagli]
uniquereasons = list(set(reasons))
intensiveinfo = []
for reason in uniquereasons:
num = len([el for el in flagli if reason == el[4]])
intensiveinfo.append([reason,num])
intensiveinfo = sorted(intensiveinfo,key=lambda x: x[1])
intensiveinfo = ["{} : {}\n".format(e[0],e[1]) for e in intensiveinfo]
amountlist[-1].append(intensiveinfo)
amountlist = sorted(amountlist,key=lambda x: x[1])
for el in amountlist:
outputt += "Dataset: {} \t Amount: {}\n".format(el[0],el[1])
if intensive:
for ele in el[2]:
outputt += " {}".format(ele)
if output=='stdout':
print (outputt)
return outputt
def flaglistclean(self,flaglist,progress=False):
"""
DESCRIPTION:
identify and remove duplicates from flaglist, only the latest inputs are used
start, endtime and key are used to identfy duplicates
PARAMETER:
flaglist (list) flaglist to be investigated
APPLICTAION:
stream = DataStream()
flaglist = db2flaglist(db,'all')
flaglistwithoutduplicates = stream.flaglistclean(flaglist)
"""
# first step - remove all duplicates
testflaglist = ['____'.join([str(date2num(elem[0])),str(date2num(elem[1])),str(elem[2]),str(elem[3]),str(elem[4]),str(elem[5]),str(date2num(elem[6]))]) for elem in flaglist]
uniques,indi = np.unique(testflaglist,return_index=True)
flaglist = [flaglist[idx] for idx in indi]
# second step - remove all inputs without components
flaglist = [elem for elem in flaglist if not elem[2] == '']
## Cleanup flaglist -- remove all inputs with duplicate start and endtime
## (use only last input)
indicies = []
for ti, line in enumerate(flaglist):
if progress and ti/1000. == np.round(ti/1000.):
print ("Current state: {} percent".format(ti/len(flaglist)*100))
if len(line) > 5:
inds = [ind for ind,elem in enumerate(flaglist) if elem[0] == line[0] and elem[1] == line[1] and elem[2] == line[2] and elem[5] == line[5]]
else:
inds = [ind for ind,elem in enumerate(flaglist) if elem[0] == line[0] and elem[1] == line[1] and elem[2] == line[2]]
if len(inds) > 1:
# get inputs dates for all duplicates and select the latest
dates = [[flaglist[dupind][-1], dupind] for dupind in inds]
indicies.append(sorted(dates)[-1][1])
else:
index = inds[-1]
indicies.append(index)
uniqueidx = (list(set(indicies)))
print ("flaglistclean: found {} unique inputs".format(len(uniqueidx)))
uniqueidx.sort()
flaglist = [flaglist[idx] for idx in uniqueidx]
return flaglist
def stream2flaglist(self, userange=True, flagnumber=None, keystoflag=None, sensorid=None, comment=None):
"""
DESCRIPTION:
Constructs a flaglist input dependent on the content of stream
PARAMETER:
comment (key or string) if key (or comma separted list of keys) are
found, then the content of this column is used (first input
flagnumber (int) integer number between 0 and 4
userange (bool) if False, each stream line results in a flag,
if True the full time range is marked
"""
### identify any given gaps and flag time ranges regarding gaps
if not comment:
print("stream2flag: you need to provide either a key or a text comment. (e.g. 'str1,str2' or 'Flagged'")
return []
if not flagnumber:
flagnumber = 0
if not keystoflag:
print("stream2flag: you need to provide a list of keys to which you apply the flags (e.g. ['x','z']")
return []
if not sensorid:
print("stream2flag: you need to provide a sensorid")
return []
commentarray = np.asarray([])
uselist = False
if comment in KEYLIST:
pos = KEYLIST.index(comment)
if userange:
comment = self.ndarray[pos][0]
else:
uselist = True
commentarray = self.ndarray[pos]
else:
lst,poslst = [],[]
commentlist = comment.split(',')
try:
for commkey in commentlist:
if commkey in KEYLIST:
#print(commkey)
pos = KEYLIST.index(commkey)
if userange:
lst.append(str(self.ndarray[pos][0]))
else:
poslst.append(pos)
else:
# Throw exception
x= 1/0
if userange:
comment = ' : '.join(lst)
else:
uselist = True
resultarray = []
for pos in poslst:
resultarray.append(self.ndarray[pos])
resultarray = np.transpose(np.asarray(resultarray))
commentarray = [''.join(str(lst)) for lst in resultarray]
except:
#comment remains unchanged
pass
now = datetime.utcnow()
res = []
if userange:
st = np.min(self.ndarray[0])
et = np.max(self.ndarray[0])
st = num2date(float(st)).replace(tzinfo=None)
et = num2date(float(et)).replace(tzinfo=None)
for key in keystoflag:
res.append([st,et,key,flagnumber,comment,sensorid,now])
else:
for idx,st in enumerate(self.ndarray[0]):
for key in keystoflag:
st = num2date(float(st)).replace(tzinfo=None)
if uselist:
res.append([st,st,key,flagnumber,commentarray[idx],sensorid,now])
else:
res.append([st,st,key,flagnumber,comment,sensorid,now])
return res
def flaglistmod(self, mode='select', flaglist=[], parameter='key', value=None, newvalue=None, starttime=None, endtime=None):
"""
DEFINITION:
Select/Replace/Delete information in flaglist
parameters are key, flagnumber, comment, startdate, enddate=None
mode delete: if only starttime and endtime are provided then all data inbetween is removed,
if parameter and value are provided this data is removed, eventuall
only between start and endtime
APPLICTAION
"""
num = 0
# convert start and end to correct format
if parameter == 'key':
num = 2
elif parameter == 'flagnumber':
num = 3
elif parameter == 'comment':
num = 4
elif parameter == 'sensorid':
num = 5
if mode in ['select','replace'] or (mode=='delete' and value):
if starttime:
starttime = self._testtime(starttime)
flaglist = [elem for elem in flaglist if elem[1] > starttime]
if endtime:
endtime = self._testtime(endtime)
flaglist = [elem for elem in flaglist if elem[0] < endtime]
elif mode == 'delete' and not value:
print ("Only deleting")
flaglist1, flaglist2 = [],[]
if starttime:
starttime = self._testtime(starttime)
flaglist1 = [elem for elem in flaglist if elem[1] < starttime]
if endtime:
endtime = self._testtime(endtime)
flaglist2 = [elem for elem in flaglist if elem[0] > endtime]
flaglist1.extend(flaglist2)
flaglist = flaglist1
if mode == 'select':
if num>0 and value:
if num == 4:
flaglist = [elem for elem in flaglist if elem[num].find(value) > 0]
elif num == 3:
flaglist = [elem for elem in flaglist if elem[num] == int(value)]
else:
flaglist = [elem for elem in flaglist if elem[num] == value]
elif mode == 'replace':
if num>0 and value:
for idx, elem in enumerate(flaglist):
if num == 4:
if elem[num].find(value) >= 0:
flaglist[idx][num] = newvalue
elif num == 3:
if elem[num] == int(value):
flaglist[idx][num] = int(newvalue)
else:
if elem[num] == value:
flaglist[idx][num] = newvalue
elif mode == 'delete':
if num>0 and value:
if num == 4:
flaglist = [elem for elem in flaglist if elem[num].find(value) < 0]
elif num == 3:
flaglist = [elem for elem in flaglist if not elem[num] == int(value)]
else:
flaglist = [elem for elem in flaglist if not elem[num] == value]
return flaglist
def flaglistadd(self, flaglist, sensorid, keys, flagnumber, comment, startdate, enddate=None):
"""
DEFINITION:
Add a specific input to a flaglist
Flaglist elements look like
[st,et,key,flagnumber,comment,sensorid,now]
APPLICATION:
newflaglist = stream.flaglistadd(oldflaglist,sensorid, keys, flagnumber, comment, startdate, enddate)
"""
# convert start and end to correct format
st = self._testtime(startdate)
if enddate:
et = self._testtime(enddate)
else:
et = st
now = datetime.utcnow()
if keys in ['all','All','ALL']:
keys = KEYLIST
for key in keys:
flagelem = [st,et,key,flagnumber,comment,sensorid,now]
exists = [elem for elem in flaglist if elem[:5] == flagelem[:5]]
if len(exists) == 0:
flaglist.append(flagelem)
else:
print ("flaglistadd: Flag already exists")
return flaglist
def flag_stream(self, key, flag, comment, startdate, enddate=None, samplingrate=0., debug=False):
"""
DEFINITION:
Add flags to specific times or time ranges (if enddate is provided).
PARAMETERS:
Variables:
- key: (str) Column to apply flag to, e.g. 'x'
- flag: (int) 0 ok, 1 remove, 2 force ok, 3 force remove,
4 merged from other instrument
- comment: (str) The reason for flag
- startdate: (datetime object) the date of the (first) datapoint to remove
Kwargs:
- enddate: (datetime object) the enddate of a time range to be flagged
- samplingrate: (float) in seconds, needs to be provided for effective nearby search
RETURNS:
- DataStream: Input stream with flags and comments.
EXAMPLE:
>>> data = data.flag_stream('x',0,'Lawnmower',flag1,flag1_end)
APPLICATION:
"""
# TODO:
# make flag_stream to accept keylists -> much faser for multiple column data
sr = samplingrate
if not key in KEYLIST:
logger.error("flag_stream: %s is not a valid key." % key)
return self
if not flag in [0,1,2,3,4]:
logger.error("flag_stream: %s is not a valid flag." % flag)
return self
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
elif not len(self) > 0:
return DataStream()
startdate = self._testtime(startdate)
if not enddate:
# Set enddate to startdat
# Hereby flag nearest might be used later
enddate = startdate
"""
start = date2num(startdate)
check_startdate, val = self.findtime(start)
if check_startdate == 0:
logger.info("flag_stream: No data at given date for flag. Finding nearest data point.")
if ndtype:
time = self.ndarray[0]
else:
time = self._get_column('time')
#print start, len(time)
new_endtime, index = find_nearest(time, start)
if new_endtime > start:
startdate = num2date(start)
enddate = num2date(new_endtime)
else:
startdate = num2date(new_endtime)
enddate = num2date(start)
else:
enddate = startdate
"""
else:
enddate = self._testtime(enddate)
### ######## IF STARTDATE == ENDDATE
### MODIFYED TO STARTDATE-Samplingrate/3, ENDDATE + Samplingrate/3
### Taking 1/3 is arbitrary.
### This helps to apply flagging info to any higher resolution record
### which does not contain the exact time stamp.
### You are likely exclude more data then necessary.
### Flag the high resolution data set to avoid that.
def rangeExtend(startdate,enddate,samplingrate,divisor=3):
if startdate == enddate:
startdate = startdate-timedelta(seconds=samplingrate/divisor)
enddate = enddate+timedelta(seconds=samplingrate/divisor)
start = date2num(startdate)
end = date2num(enddate)
return start,end
else:
start = date2num(startdate)
end = date2num(enddate)
return start,end
pos = FLAGKEYLIST.index(key)
if debug:
print("flag_stream: Flag",startdate, enddate)
start = date2num(startdate)
end = date2num(enddate)
mint = np.min(self.ndarray[0])
maxt = np.max(self.ndarray[0])
if start < mint and end < mint:
st = 0
ed = 0
elif start > maxt and end > maxt:
st = 0
ed = 0
else:
### Modified to use nearest value to be flagged if flagtimes
### overlap with streams timerange
### find_nearest is probably very slowly...
### Using startidx values to speed up the process at least for later data
# Get start and end indicies:
if debug:
ti1 = datetime.utcnow()
st, ls = self.findtime(startdate,mode='argmax')
# st is the starttime, ls ? -- modification allow to provide key list!!
if debug:
ti2 = datetime.utcnow()
print ("flag_stream: findtime duration", ti2-ti1)
#if debug:
# ti1 = datetime.utcnow()
# testls = nonzero(self.ndarray[0]==startdate)
# ti2 = datetime.utcnow()
# print ("Findtime duration -alternative", ti2-ti1)
if st == 0:
#print("Flag_stream: slowly start",st)
if not sr == 0:
# Determine sampling rate if not done yet
start,end = rangeExtend(startdate,enddate,sr)
ls,st = find_nearest(self.ndarray[0],start)
sti = st-2
if sti < 0:
sti = 0
ed, le = self.findtime(enddate,startidx=sti,mode='argmax')
if ed == 0:
#print("Flag_stream: slowly end",ed)
if not sr == 0:
# Determine sampling rate if not done yet
start,end = rangeExtend(startdate,enddate,sr)
le, ed = find_nearest(self.ndarray[0],end) ### TODO use startundex here as well
if ed == len(self.ndarray[0]):
ed = ed-1
# Create a defaultflag
defaultflag = ['-' for el in FLAGKEYLIST]
if debug:
ti3 = datetime.utcnow()
print ("Full Findtime duration", ti3-ti1)
print("flagging", st, ed)
if ndtype:
array = [[] for el in KEYLIST]
flagind = KEYLIST.index('flag')
commentind = KEYLIST.index('comment')
# Check whether flag and comment are exisiting - if not create empty
if not len(self.ndarray[flagind]) > 0:
array[flagind] = [''] * len(self.ndarray[0])
else:
array[flagind] = list(self.ndarray[flagind])
if not len(self.ndarray[commentind]) > 0:
array[commentind] = [''] * len(self.ndarray[0])
else:
array[commentind] = list(self.ndarray[commentind])
# Now either modify existing or add new flag
if st==0 and ed==0:
pass
else:
t3a = datetime.utcnow()
for i in range(st,ed+1):
#if self.ndarray[flagind][i] == '' or self.ndarray[flagind][i] == '-':
if array[flagind][i] == '' or array[flagind][i] == '-':
flagls = defaultflag
else:
flagls = list(array[flagind][i])
# if existing flaglistlength is shorter, because new columns where added later to ndarray
if len(flagls) < pos:
flagls.extend(['-' for j in range(pos+1-flagls)])
flagls[pos] = str(flag)
array[flagind][i] = ''.join(flagls)
array[commentind][i] = comment
self.ndarray[flagind] = np.array(array[flagind], dtype=np.object)
self.ndarray[commentind] = np.array(array[commentind], dtype=np.object)
# up to 0.3.98 the following code was used (~10 times slower)
# further significant speed up requires some structural changes:
# 1. use keylist here
#self.ndarray[flagind] = np.asarray(array[flagind]).astype(object)
#self.ndarray[commentind] = np.asarray(array[commentind]).astype(object)
else:
for elem in self:
if elem.time >= start and elem.time <= end:
fllist = list(elem.flag)
if not len(fllist) > 1:
fllist = defaultflag
fllist[pos] = str(flag)
elem.flag=''.join(fllist)
elem.comment = comment
if flag == 1 or flag == 3 and debug:
if enddate:
#print ("flag_stream: Flagged data from %s to %s -> (%s)" % (startdate.isoformat(),enddate.isoformat(),comment))
try:
logger.info("flag_stream: Flagged data from %s to %s -> (%s)" % (startdate.isoformat().encode('ascii','ignore'),enddate.isoformat().encode('ascii','ignore'),comment.encode('ascii','ignore')))
except:
pass
else:
try:
logger.info("flag_stream: Flagged data at %s -> (%s)" % (startdate.isoformat().encode('ascii','ignore'),comment.encode('ascii','ignore')))
except:
pass
return self
def simplebasevalue2stream(self,basevalue,**kwargs):
"""
DESCRIPTION:
simple baselvalue correction using a simple basevalue list
PARAMETERS:
basevalue (list): [baseH,baseD,baseZ]
keys (list): default = 'x','y','z'
APPLICTAION:
used by stream.baseline
"""
mode = kwargs.get('mode')
keys = ['<KEY>']
# Changed that - 49 sec before, no less then 2 secs
if not len(self.ndarray[0]) > 0:
print("simplebasevalue2stream: requires ndarray")
return self
#1. calculate function value for each data time step
array = [[] for key in KEYLIST]
array[0] = self.ndarray[0]
# get x array for baseline
#indx = KEYLIST.index('x')
for key in KEYLIST:
ind = KEYLIST.index(key)
if key in keys: # new
#print keys.index(key)
ar = self.ndarray[ind].astype(float)
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndarray[ind]), key, self.ndarray[ind]
array[ind] = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + basevalue[keys.index(key)]
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
array[ind] = ar + basevalue[keys.index(key)]
if key == 'x': # remember this for correct y determination
arrayx = array[ind]
else: # new
if len(self.ndarray[ind]) > 0:
array[ind] = self.ndarray[ind].astype(object)
self.header['DataComponents'] = 'HDZ'
return DataStream(self,self.header,np.asarray(array))
def func2stream(self,funclist,**kwargs):
"""
DESCRIPTION:
combine data stream and functions obtained by fitting and interpolation. Possible combination
modes are 'add' (default), subtract 'sub', divide 'div' and 'multiply'. Furthermore, the
function values can replace the original values at the given timesteps of the stream
PARAMETERS:
funclist (list of functions): required - each function is an output of stream.fit or stream.interpol
#function (function): required - output of stream.fit or stream.interpol
keys (list): default = '<KEY>'
mode (string): one of 'add','sub','div','multiply','values' - default = 'add'
APPLICTAION:
used by stream.baseline
"""
keys = kwargs.get('keys')
fkeys = kwargs.get('fkeys')
mode = kwargs.get('mode')
if not keys:
keys = ['<KEY>']
if not mode:
mode = 'add'
if fkeys and not len(fkeys) == len(keys):
fkeys=None
logger.warning("func2stream: provided fkeys do not match keys")
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist # TODO: cycle through list
totalarray = [[] for key in KEYLIST]
posstr = KEYLIST.index('str1')
testx = []
for function in funct:
#print ("Testing", function)
if not function:
return self
# Changed that - 49 sec before, no less then 2 secs
if not len(self.ndarray[0]) > 0:
print("func2stream: requires ndarray - trying old LineStruct functions")
if mode == 'add':
return self.func_add(function, keys=keys)
elif mode == 'sub':
return self.func_subtract(function, keys=keys)
else:
return self
#1. calculate function value for each data time step
array = [[] for key in KEYLIST]
array[0] = self.ndarray[0]
dis_done = False
# get x array for baseline
#indx = KEYLIST.index('x')
#arrayx = self.ndarray[indx].astype(float)
functimearray = (self.ndarray[0].astype(float)-function[1])/(function[2]-function[1])
for key in KEYLIST:
validkey = False
ind = KEYLIST.index(key)
if key in keys: # new
#print ("DEALING: ", key)
keyind = keys.index(key)
if fkeys:
fkey = fkeys[keyind]
else:
fkey = key
ar = np.asarray(self.ndarray[ind]).astype(float)
try:
test = function[0]['f'+fkey](functimearray)
validkey = True
except:
pass
if mode == 'add' and validkey:
print ("here", ar, function[0]['f'+fkey](functimearray))
array[ind] = ar + function[0]['f'+fkey](functimearray)
elif mode == 'addbaseline' and validkey:
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndarray[ind]), key, self.ndarray[ind]
array[ind] = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + function[0]['f'+fkey](functimearray)
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
#print("func2stream", function, function[0], function[0]['f'+key],functimearray)
array[ind] = ar + function[0]['f'+fkey](functimearray)
if len(array[posstr]) == 0:
#print ("Assigned values to str1: function {}".format(function[1]))
array[posstr] = ['c']*len(ar)
if len(testx) > 0 and not dis_done:
# identify change from number to nan
# add discontinuity marker there
#print ("Here", testx)
prevel = np.nan
for idx, el in enumerate(testx):
if not np.isnan(prevel) and np.isnan(el):
array[posstr][idx] = 'd'
#print ("Modified str1 at {}".format(idx))
break
prevel = el
dis_done = True
if key == 'x': # remember this for correct y determination
arrayx = array[ind]
testx = function[0]['f'+fkey](functimearray)
if key == 'dx': # use this column to test if delta values are already provided
testx = function[0]['f'+fkey](functimearray)
elif mode in ['sub','subtract'] and validkey:
array[ind] = ar - function[0]['f'+fkey](functimearray)
elif mode == 'values' and validkey:
array[ind] = function[0]['f'+fkey](functimearray)
elif mode == 'div' and validkey:
array[ind] = ar / function[0]['f'+fkey](functimearray)
elif mode == 'multiply' and validkey:
array[ind] = ar * function[0]['f'+fkey](functimearray)
elif validkey:
print("func2stream: mode not recognized")
else: # new
if len(self.ndarray[ind]) > 0:
array[ind] = np.asarray(self.ndarray[ind]).astype(object)
for idx, col in enumerate(array):
if len(totalarray[idx]) > 0 and not idx == 0:
totalcol = totalarray[idx]
for j,el in enumerate(col):
if idx < len(NUMKEYLIST)+1 and not np.isnan(el) and np.isnan(totalcol[j]):
totalarray[idx][j] = array[idx][j]
if idx > len(NUMKEYLIST) and not el == 'c' and totalcol[j] == 'c':
totalarray[idx][j] = 'd'
else:
totalarray[idx] = array[idx]
return DataStream(self,self.header,np.asarray(totalarray,dtype=object))
def func_add(self,funclist,**kwargs):
"""
Add a function to the selected values of the data stream -> e.g. get baseline
Optional:
keys (default = 'x','y','z')
"""
keys = kwargs.get('keys')
mode = kwargs.get('mode')
if not keys:
keys = ['<KEY>']
if not mode:
mode = 'add'
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
function = funct[0] # Direct call of old version only accepts single function
# Changed that - 49 sec before, no less then 2 secs
if len(self.ndarray[0]) > 0:
#1. calculate function value for each data time step
array = [[] for key in KEYLIST]
array[0] = self.ndarray[0]
functimearray = (self.ndarray[0].astype(float)-function[1])/(function[2]-function[1])
#print functimearray
for key in keys:
ind = KEYLIST.index(key)
if mode == 'add':
array[ind] = self.ndarray[ind] + function[0]['f'+key](functimearray)
elif mode == 'sub':
array[ind] = self.ndarray[ind] - function[0]['f'+key](functimearray)
elif mode == 'values':
array[ind] = function[0]['f'+key](functimearray)
elif mode == 'div':
array[ind] = self.ndarray[ind] / function[0]['f'+key](functimearray)
elif mode == 'multiply':
array[ind] = self.ndarray[ind] * function[0]['f'+key](functimearray)
else:
print("func2stream: mode not recognized")
return DataStream(self,self.header,np.asarray(array,dtype=object))
for elem in self:
# check whether time step is in function range
if function[1] <= elem.time <= function[2]:
functime = (elem.time-function[1])/(function[2]-function[1])
for key in keys:
if not key in KEYLIST[1:16]:
raise ValueError("Column key not valid")
fkey = 'f'+key
exec('keyval = elem.'+key)
if fkey in function[0] and not isnan(keyval):
try:
newval = keyval + function[0][fkey](functime)
except:
newval = float('nan')
exec('elem.'+key+' = newval')
else:
pass
else:
pass
return self
def func_subtract(self,funclist,**kwargs):
"""
Subtract a function from the selected values of the data stream -> e.g. obtain Residuals
Optional:
keys (default = '<KEY>')
:type order int
:param order : 0 -> stream - function; 1 -> function - stream
"""
keys = kwargs.get('keys')
order = kwargs.get('order')
st = DataStream()
st = self.copy()
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
function = funct[0] # Direct call of old version only accepts single function
"""
for el in self:
li = LineStruct()
li.time = el.time
li.x = el.x
li.y = el.y
li.z = el.z
st.add(li)
"""
if not order:
order = 0
if not keys:
keys = ['<KEY>']
for elem in st:
# check whether time step is in function range
if function[1] <= elem.time <= function[2]:
functime = (elem.time-function[1])/(function[2]-function[1])
for key in keys:
if not key in KEYLIST[1:16]:
raise ValueError("Column key not valid")
fkey = 'f'+key
exec('keyval = elem.'+key)
if fkey in function[0] and not isnan(keyval):
try:
if order == 0:
newval = keyval - function[0][fkey](functime)
else:
newval = function[0][fkey](functime) - keyval
except:
newval = float('nan')
exec('elem.'+key+' = newval')
else:
pass
else:
pass
return st
def func2header(self,funclist,debug=False):
"""
DESCRIPTION
Add a list of functions into the data header
"""
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
self.header['DataFunctionObject'] = funct
return self
def GetKeyName(self,key):
"""
DESCRIPTION
get the content name of a specific key
will scan header information until successful:
(1) col-"key" names
(2) ColumnContent header info
(3) SensorElements header info
if no Name for the key is found, then the key itself is returned
APPLICATION:
element = datastream.GetKeyName('var1')
"""
if not key in KEYLIST:
print ("key not in KEYLIST - aborting")
return ''
element = ''
# One
try:
element = self.header.get("col-{}".format(key))
if not element == '':
return element
except:
pass
# Two
try:
element = self.header.get('ColumnContents','').split(',')[KEYLIST.index(key)]
if not element == '':
return element
except:
pass
# Three
try:
idx = self.header.get('SensorKeys','').split(',').index(key)
element = self.header.get('SensorElements','').split(',')[idx]
if not element == '':
return element
except:
pass
return key
def GetKeyUnit(self,key):
"""
DESCRIPTION
get the content name of a specific key
will scan header information until successful:
(1) unit-col-"key" names
(2) ColumnUnit header info
if no unit for the key is found, then an empty string is returned
APPLICATION:
unit = datastream.GetKeyUnit('var1')
"""
if not key in KEYLIST:
print ("key not in KEYLIST - aborting")
return ''
unit = ''
# One
try:
unit = self.header.get("unit-col-{}".format(key))
if not unit == '':
return unit
except:
pass
# Two
try:
unit = self.header.get('ColumnUnits','').split(',')[KEYLIST.index(key)]
if not unit == '':
return unit
except:
pass
return unit
def get_gaps(self, **kwargs):
"""
DEFINITION:
Takes the dominant sample frequency and fills nan into non-existing time steps:
This function provides the basis for discontinuous plots and gap analysis and proper filtering.
PARAMETERS:
Variables:
---
Kwargs:
- accuracy: (float) time relative to a day - default 1 sec
- gapvariable: (string) - refering to stream column - default='var5' - This column
is overwritten with 0 (data) and 1 (no data).
- key: (string) - refering to a data column e.g. key='x'. If given then all NaN values with existing time steps are also marked by '1' in the gapvariable line for this key
RETURNS:
- stream: (Datastream)
EXAMPLE:
>>> stream_with_gaps_filled = stream_with_aps.get_gaps(['f'])
APPLICATION:
used by nfilter() for correct filtering
CHANGES:
Last updated and tested with nfilter function by leon 2014-07-22
"""
accuracy = kwargs.get('accuracy')
key = kwargs.get('key')
gapvariable = kwargs.get('gapvariable')
debug = kwargs.get('debug')
if key in KEYLIST:
gapvariable = True
if not gapvariable:
gapvariable = 'var5'
if not self.length()[0] > 1:
print ("get_gaps: Stream does not contain data - aborting")
return self
# Better use get_sampling period as samplingrate is rounded
#spr = self.get_sampling_period()
#newsps = newsp*3600.0*24.0
newsps = self.samplingrate()
newsp = newsps/3600.0/24.0
if not accuracy:
accuracy = 0.9/(3600.0*24.0) # one second relative to day
accuracy = 0.05*newsp # 5 percent of samplingrate
if newsps < 0.9 and not accuracy:
accuracy = (newsps-(newsps*0.1))/(3600.0*24.0)
logger.info('--- Starting filling gaps with NANs at %s ' % (str(datetime.now())))
stream = self.copy()
prevtime = 0
ndtype = False
if len(stream.ndarray[0]) > 0:
maxtime = stream.ndarray[0][-1]
mintime = stream.ndarray[0][0]
length = len(stream.ndarray[0])
sourcetime = stream.ndarray[0]
ndtype = True
else:
mintime = self[0].time
maxtime = self[-1].time
if debug:
print("Time range:", mintime, maxtime)
print("Length, samp_per and accuracy:", self.length()[0], newsps, accuracy)
shift = 0
if ndtype:
# Get time diff and expected count
timediff = maxtime - mintime
expN = int(round(timediff/newsp))+1
if debug:
print("Expected length vs actual length:", expN, length)
if expN == len(sourcetime):
# Found the expected amount of time steps - no gaps
logger.info("get_gaps: No gaps found - Returning")
return stream
else:
# correct way (will be used by default) - does not use any accuracy value
#projtime = np.linspace(mintime, maxtime, num=expN, endpoint=True)
#print("proj:", projtime, len(projtime))
# find values or projtime, which are not in sourcetime
#dif = setdiff1d(projtime,sourcetime, assume_unique=True)
#print (dif, len(dif))
#print (len(dif),len(sourcetime),len(projtime))
diff = sourcetime[1:] - sourcetime[:-1]
num_fills = np.round(diff / newsp) - 1
getdiffids = np.where(diff > newsp+accuracy)[0]
logger.info("get_gaps: Found gaps - Filling nans to them")
if debug:
print ("Here", diff, num_fills, newsp, getdiffids)
missingt = []
# Get critical differences and number of missing steps
for i in getdiffids:
#print (i, sourcetime[i-1], sourcetime[i], sourcetime[i+1])
nf = num_fills[i]
# if nf is larger than zero then get append the missing time steps to missingt list
if nf > 0:
for n in range(int(nf)): # add n+1 * samplingrate for each missing value
missingt.append(sourcetime[i]+(n+1)*newsp)
print ("Filling {} gaps".format(len(missingt)))
# Cycle through stream and append nans to each column for missing time steps
nans = [np.nan] * len(missingt)
empts = [''] * len(missingt)
gaps = [0.0] * len(missingt)
for idx,elem in enumerate(stream.ndarray):
if idx == 0:
# append missingt list to array element
elem = list(elem)
lenelem = len(elem)
elem.extend(missingt)
stream.ndarray[idx] = np.asarray(elem).astype(object)
elif len(elem) > 0:
# append nans list to array element
elem = list(elem)
if KEYLIST[idx] in NUMKEYLIST or KEYLIST[idx] == 'sectime':
elem.extend(nans)
else:
elem.extend(empts)
stream.ndarray[idx] = np.asarray(elem).astype(object)
elif KEYLIST[idx] == gapvariable:
# append nans list to array element
elem = [1.0]*lenelem
elem.extend(gaps)
stream.ndarray[idx] = np.asarray(elem).astype(object)
return stream.sorting()
else:
stream = DataStream()
for elem in self:
if abs((prevtime+newsp) - elem.time) > accuracy and not prevtime == 0:
currtime = num2date(prevtime)+timedelta(seconds=newsps)
while currtime <= num2date(elem.time):
newline = LineStruct()
exec('newline.'+gapvariable+' = 1.0')
newline.time = date2num(currtime)
stream.add(newline)
currtime += timedelta(seconds=newsps)
else:
exec('elem.'+gapvariable+' = 0.0')
if key in KEYLIST:
if isnan(eval('elem.'+key)):
exec('elem.'+gapvariable+' = 1.0')
stream.add(elem)
prevtime = elem.time
logger.info('--- Filling gaps finished at %s ' % (str(datetime.now())))
if debugmode:
print("Ending:", stream[0].time, stream[-1].time)
return stream.sorting()
def get_rotationangle(self, xcompensation=0,keys=['x','y','z'],**kwargs):
"""
DESCRIPTION:
"Estimating" the rotation angle towards a magnetic coordinate system
assuming z to be vertical down. Please note: You need to provide a
complete horizontal vector including either the x compensation field
or if not available an annual estimate of the vector. This method can be used
to determine reorientation characteristics in order to accurately apply
HDZ optimzed basevalue calculations.
RETURNS:
rotangle (float) The estimated rotation angle in degree
"""
annualmeans = kwargs.get('annualmeans')
#1. get vector from data
# x = y*tan(dec)
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
logger.error('get_rotation: provided keylist need to have three components.')
return stream #self
logger.info('get_rotation: Determining rotation angle towards a magnetic coordinate system assuming z to be vertical down.')
ind1 = KEYLIST.index(keys[0])
ind2 = KEYLIST.index(keys[1])
ind3 = KEYLIST.index(keys[2])
if len(self.ndarray[0]) > 0:
if len(self.ndarray[ind1]) > 0 and len(self.ndarray[ind2]) > 0 and len(self.ndarray[ind3]) > 0:
# get mean disregarding nans
xl = [el for el in self.ndarray[ind1] if not np.isnan(el)]
yl = [el for el in self.ndarray[ind2] if not np.isnan(el)]
if annualmeans:
meanx = annualmeans[0]
else:
meanx = np.mean(xl)+xcompensation
meany = np.mean(yl)
# get rotation angle so that meany == 0
#print ("Rotation",meanx, meany)
#zeroy = meanx*np.sin(ra)+meany*np.cos(ra)
#-meany/meanx = np.tan(ra)
rotangle = np.arctan2(-meany,meanx) * (180.) / np.pi
logger.info('getrotation: Rotation angle determined: {} deg'.format(rotangle))
return rotangle
def get_sampling_period(self):
"""
returns the dominant sampling frequency in unit ! days !
for time savings, this function only tests the first 1000 elements
"""
# For proper applictation - duplicates are removed
self = self.removeduplicates()
if len(self.ndarray[0]) > 0:
timecol = self.ndarray[0].astype(float)
else:
timecol= self._get_column('time')
# New way:
if len(timecol) > 1:
diffs = np.asarray(timecol[1:]-timecol[:-1])
diffs = diffs[~np.isnan(diffs)]
me = np.median(diffs)
st = np.std(diffs)
diffs = [el for el in diffs if el <= me+2*st and el >= me-2*st]
return np.median(diffs)
else:
return 0.0
"""
timedifflist = [[0,0]]
timediff = 0
if len(timecol) <= 1000:
testrange = len(timecol)
else:
testrange = 1000
print "Get_sampling_rate", np.asarray(timecol[1:]-timecol[:-1])
print "Get_sampling_rate", np.median(np.asarray(timecol[1:]-timecol[:-1]))*3600.*24.
for idx, val in enumerate(timecol[:testrange]):
if idx > 1 and not isnan(val):
timediff = np.round((val-timeprev),7)
found = 0
for tel in timedifflist:
if tel[1] == timediff:
tel[0] = tel[0]+1
found = 1
if found == 0:
timedifflist.append([1,timediff])
timeprev = val
#print self
if not len(timedifflist) == 0:
timedifflist.sort(key=lambda x: int(x[0]))
# get the most often found timediff
domtd = timedifflist[-1][1]
else:
logger.error("get_sampling_period: unkown problem - returning 0")
domtd = 0
if not domtd == 0:
return domtd
else:
try:
return timedifflist[-2][1]
except:
logger.error("get_sampling_period: could not identify dominant sampling rate")
return 0
"""
def samplingrate(self, **kwargs):
"""
DEFINITION:
returns a rounded value of the sampling rate
in seconds
and updates the header information
"""
# XXX include that in the stream reading process....
digits = kwargs.get('digits')
notrounded = kwargs.get('notrounded')
if not digits:
digits = 1
if not self.length()[0] > 1:
return 0.0
sr = self.get_sampling_period()*24*3600
unit = ' sec'
val = sr
# Create a suitable rounding function:
# Use simple rounds if sr > 60 secs
# Check accuracy for sr < 10 secs (three digits:
# if abs(sr-round(sr,0)) * 1000 e.g. (1.002 -> 2, 0.998 -> 2)
if sr < 0.05:
for i in range(0,5):
multi = 10**i
srfloor = np.floor(sr*multi)
if srfloor >= 1:
# found multiplicator
# now determine significance taking into account three more digits
digs = np.floor(np.abs(sr*multi-srfloor)*1000)
if digs<5: # round to zero
val = np.round(srfloor/multi,1)
else:
val = np.round(sr,5)
break
elif sr < 59:
for i in range(0,3):
multi = 10**i
srfloor = np.floor(sr*multi)
if srfloor >= 1:
# found multiplicator
# now determine significance taking into account three more digits
digs = np.floor(np.abs(sr*multi-srfloor)*1000)
if digs<5: # round to zero
val = np.round(srfloor/multi,1)
else:
val = np.round(sr,3)
break
else:
val = np.round(sr,1)
"""
if np.round(sr*10.,0) == 0:
val = np.round(sr,2)
#unit = ' Hz'
elif np.round(sr,0) == 0:
if 0.09 < sr < 0.11:
val = np.round(sr,digits)
else:
val = np.round(sr,2)
#unit = ' Hz'
else:
val = np.round(sr,0)
"""
if notrounded:
val = sr
self.header['DataSamplingRate'] = str(val) + unit
return val
def integrate(self, **kwargs):
"""
DESCRIPTION:
Method to integrate selected columns respect to time.
-- Using scipy.integrate.cumtrapz
VARIABLES:
optional:
keys: (list - default ['x','y','z','f'] provide limited key-list
"""
logger.info('--- Integrating started at %s ' % str(datetime.now()))
keys = kwargs.get('keys')
if not keys:
keys = ['x','y','z']
array = [[] for key in KEYLIST]
ndtype = False
if len(self.ndarray[0])>0:
ndtype = True
t = self.ndarray[0]
array[0] = t
else:
t = self._get_column('time')
for key in keys:
if ndtype:
ind = KEYLIST.index(key)
val = self.ndarray[ind]
array[ind] = np.asarray(val)
else:
val = self._get_column(key)
dval = sp.integrate.cumtrapz(np.asarray(val),t)
dval = np.insert(dval, 0, 0) # Prepend 0 to maintain original length
if ndtype:
ind = KEYLIST.index('d'+key)
array[ind] = np.asarray(dval)
else:
self._put_column(dval, 'd'+key)
self.ndarray = np.asarray(array)
logger.info('--- integration finished at %s ' % str(datetime.now()))
return self
def interpol(self, keys, **kwargs):
"""
DEFINITION:
Uses Numpy interpolate.interp1d to interpolate streams.
PARAMETERS:
Variables:
- keys: (list) List of keys to interpolate.
Kwargs:
- kind: (str) type of interpolation. Options:
linear = linear - Default
slinear = spline (first order)
quadratic = spline (second order)
cubic = spline (third order)
nearest = ?
zero = ?
(TODO: add these?)
- timerange: (timedelta object) default=timedelta(hours=1).
- fitdegree: (float) default=4.
- knotstep: (float < 0.5) determines the amount of knots:
amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
RETURNS:
- func: (list) Contains the following:
list[0]: (dict) {'f+key': interpolate function}
list[1]: (float) date2num value of minimum timestamp
list[2]: (float) date2num value of maximum timestamp
EXAMPLE:
>>> int_data = pos_data.interpol(['f'])
APPLICATION:
"""
kind = kwargs.get('kind')
if not kind:
kind = 'linear'
if kind not in ['linear','slinear','quadratic','cubic','nearest','zero']:
logger.warning("interpol: Interpolation kind %s not valid. Using linear interpolation instead." % kind)
kind = 'linear'
ndtype = False
if len(self.ndarray[0]) > 0:
t = self.ndarray[0]
ndtype = True
else:
t = self._get_column('time')
nt,sv,ev = self._normalize(t)
sp = self.get_sampling_period()
functionkeylist = {}
logger.info("interpol: Interpolating stream with %s interpolation." % kind)
for key in keys:
if not key in NUMKEYLIST:
logger.error("interpol: Column key not valid!")
if ndtype:
ind = KEYLIST.index(key)
val = self.ndarray[ind].astype(float)
else:
val = self._get_column(key)
# interplolate NaN values
nans, xxx= nan_helper(val)
try: # Try to interpolate nan values
val[nans]= np.interp(xxx(nans), xxx(~nans), val[~nans])
except:
#val[nans]=int(nan)
pass
if len(val)>1:
exec('f'+key+' = interpolate.interp1d(nt, val, kind)')
exec('functionkeylist["f'+key+'"] = f'+key)
else:
logger.warning("interpol: interpolation of zero length data set - wont work.")
pass
logger.info("interpol: Interpolation complete.")
func = [functionkeylist, sv, ev]
return func
def interpolate_nans(self, keys):
""""
DEFINITION:
Provides a simple linear nan interpolator that returns the interpolated
data in the stream. Uses method that is already present elsewhere, e.g.
in filter, for easy and quick access.
PARAMETERS:
- keys: List of keys to interpolate.
RETURNS:
- stream: Original stream with nans replaced by linear interpolation.
"""
for key in keys:
if key not in NUMKEYLIST:
logger.error("interpolate_nans: {} is an invalid key! Cannot interpolate.".format(key))
y = self._get_column(key)
nans, x = nan_helper(y)
y[nans] = np.interp(x(nans), x(~nans), y[~nans])
self._put_column(y, key)
logger.info("interpolate_nans: Replaced nans in {} with linearly interpolated values.".format(key))
return self
def k_extend(self, **kwargs):
"""
DESCRIPTION:
Extending the k_scale from 9 to 28 values as used for the GFZ kp value
"""
k9_level = kwargs.get('k9_level')
if not k9_level:
if 'StationK9' in self.header:
# 1. Check header info
k9_level = self.header['StationK9']
else:
# 2. Set Potsdam default
k9_level = 500
fortscale = [0,7.5,15,30,60,105,180,300,495,750]
k_scale = [float(k9_level)*elem/750.0 for elem in fortscale]
newlst = []
klst = [0.,0.33,0.66,1.,1.33,1.66,2.,2.33,2.66,3.,3.33,3.66,4.,4.33,4.66,5.,5.33,5.66,6.,6.33,6.66,7.,7.33,7.66,8.,8.33,8.66,9.]
for idx,elem in enumerate(k_scale):
if idx > 0:
diff = elem - k_scale[idx-1]
newlst.append(elem-2*diff/3)
newlst.append(elem-diff/3)
newlst.append(elem)
indvar1 = KEYLIST.index('var1')
indvar2 = KEYLIST.index('var2')
ar = []
for elem in self.ndarray[indvar2]:
for count,val in enumerate(newlst):
if elem > val:
k = klst[count]
ar.append(k)
self.ndarray[indvar1] = np.asarray(ar)
return self
def k_fmi(self, **kwargs):
"""
DESCRIPTION:
Calculating k values following the fmi approach. The method uses three major steps:
Firstly, the record is eventually filtered to minute data, outliers are removed
(using default options) and gaps are interpolated. Ideally, these steps have been
contucted before, which allows for complete control of these steps.
Secondly, the last 27 hours are investigated. Starting from the last record, the last
three hour segment is taken and the fmi approach is applied. Finally, the provided
stream is analyzed from the beginning. Definite values are thus produced for the
previous day after 3:00 am (depending on n - see below).
The FMI method:
The provided data stream is checked and converted to xyz data. Investigated are the
horizontal components. In a first run k values are calculated by simply determining
the max/min difference of the minute variation data within the three hour segements.
This is done for both horizontal components and the maximum difference is selected.
Using the transformation table related to the Niemegk scale the k values are calculated.
Based on these k values, a first estimate of the quiet daily variation (Sr) is obtained.
Hourly means with extended time ranges (30min + m + n) are obtained for each x.5 hour.
m refers to 120 minutes (0-3a.m., 21-24p.m.), 60 minutes (3-6, 18-21) or 0 minutes.
n is determined by k**3.3.
xyz within the code always refers to the coordinate system of the sensor and not to any geomagnetic reference.
By default it is assumed that the provided stream comes from a hdz oriented instrument.
For xyz (or any other) orientation use the option checky=True to investigate both horizontal components.
If the stream contains absolute data, the option hcomp = True transforms the stream to hdz.
The following steps are performed:
1. Asserts: Signal covers at least 24 hours, sampling rate minute or second
2. Produce filtered minute signal, check for gaps, eventually interpolate (done by filter/sm algorythm) - needs some improvements
3. from the last value contained get 3 hour segments and calculate max, min and max-min
kwargs support the following keywords:
- k9_level (float) the value for which k9 is defined, all other values a linearly approximated
- magnetic latitude (float) another way to define the k scale
- timerange (timedelta obsject) default=timedelta(hours=1)
- fitdegree (float) default=5
- knotstep (float < 0.5) determines the amount of knots: amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
- flag
PARAMETER:
k9_level (int) define the Observatories K9 Level. If not provided then firstly
the header information is scanned for a 'StationK9' input. If not
successful a K9 of 500 nT is assumend.
"""
plot = kwargs.get('plot')
debug = kwargs.get('debug')
hcomp = kwargs.get('hcomp')
fitdegree = kwargs.get('fitdegree')
fitfunc=kwargs.get('fitfunc')
magnetic_latitude = kwargs.get('magnetic_latitude')
k9_level = kwargs.get('k9_level')
checky = kwargs.get('checky') # used for xyz data if True then the y component is checked as well
if not fitfunc:
fitfunc = 'harmonic'
if not fitdegree:
fitdegree = 5
if not k9_level:
if 'StationK9' in self.header:
# 1. Check header info
k9_level = self.header['StationK9']
else:
# 2. Set Potsdam default
k9_level = 500
# Some basics:
startinghours = [0,3,6,9,12,15,18,21]
mlist = [120,60,0,0,0,0,60,120]
#ngkscale = [0,5,10,20,40,70,120,200,330,500]
fortscale = [0,7.5,15,30,60,105,180,300,495,750]
k_scale = [float(k9_level)*elem/750.0 for elem in fortscale]
# calculate local scale from magnetic latitude (inclination):
# important: how to do that - what is the latitudinal relationship, how to transfer the scale,
# it is frequently mentioned to be quasi-log but it is not a simple Log scale
# func can be fitted reasonably well by
# func[a_] := Exp[0.8308663199145958 + 0.7894060396483681 k - 0.021250627459823503 k^2]
kstream = DataStream()
logger.info('--- Starting k value calculation: %s ' % (str(datetime.now())))
# Non destructive - using a coyp of the supplied stream
stream = self.copy()
# ############################################
# ## Step 1 ##############
# ## ------------------------ ##############
# ## preparing data: ##############
# ## - check sampling/length ##############
# ## - check type (xyz etc) ##############
# ## - check removing outliers ##############
# ## - eventually filter ##############
# ## - interpolate/fill gaps ##############
# ############################################
# removing outliers
if debug:
print("Removing outliers")
stream = stream.flag_outlier(keys=['x','y','z'],threshold=6.) # Weak conditions
stream = stream.remove_flagged()
sr = stream.samplingrate()
if debug:
print("Sampling rate", sr)
if sr > 65:
print("Algorythm requires minute or higher resolution - aborting")
return DataStream()
if sr <= 0.9:
print("Data appears to be below 1 second resolution - filtering to seconds first")
stream = stream.nfilter(filter_width=timedelta(seconds=1))
sr = stream.samplingrate()
if 0.9 < sr < 55:
print("Data appears to be below 1 minute resolution - filtering to minutes")
stream = stream.nfilter(filter_width=timedelta(minutes=1))
else:
pass
# get_gaps - put nans to missing data
# then replace nans with interpolated values
#nans, x= nan_helper(v)
# v[nans]= interp(x(nans), x(~nans), v[~nans])
ndtype = True
if len(stream.ndarray[0]) > 0:
ndtype = True
timediff = np.max(stream.ndarray[0]) - np.min(stream.ndarray[0])
indtyp = KEYLIST.index('typ')
try:
gettyp = stream.ndarray[indtyp][0]
except:
gettyp = 'xyzf'
print("ndtype - Timeseries ending at:", num2date(np.max(stream.ndarray[0])))
else:
timediff = stream[-1].time - stream[0].time
gettyp = stream[0].typ
print("LineStruct - Timeseries ending at:", num2date(stream[-1].time))
print("Coverage in days:", timediff)
if timediff < 1.1: # 1 corresponds to 24 hours
print("not enough time covered - aborting")
return
if debug:
print("Typ:", gettyp)
# Transform the coordinate system to XYZ, asuming a hdz orientation.
fmistream = stream
if gettyp == 'idff':
fmistream = stream._convertstream('idf2xyz',keep_header=True)
elif gettyp == 'hdzf':
fmistream = stream._convertstream('hdz2xyz',keep_header=True)
elif not gettyp == 'xyzf':
print("Unkown type of data - please provide xyzf, idff, hdzf -aborting")
return
# By default use H for determination
if debug:
print("converting data to hdz - only analyze h")
print("This is applicable in case of baselinecorrected data")
# TODO Important currently we are only using x (or x and y)
if hcomp:
print("Please note: H comp requires that columns xyz contain baseline corrected values")
fmistream = fmistream._convertstream('xyz2hdz',keep_header=True)
elif 'DataAbsFunctionObject' in fmistream.header:
print("Found Baseline function")
pass # to a bc correction and
checky = True
else:
# If variation data use maximum from x and y
checky = True
# ############################################
# ## Step 2 ##############
# ## ------------------------ ##############
# ## some functions ##############
# ############################################
def klist2stream(klist, kvalstream=DataStream() ,ndtype=True):
"""
Internal method to convert a k value list to a stream
"""
#emptystream = DataStream()
if len(kvalstream.ndarray[0]) > 0:
kexists = True
#ti = list(li.ndarray[0])
#print "Previous k", li.ndarray
elif len(kvalstream) > 0:
kexists = True
#li = [elem for elem in kvalstream]
#ti = [elem.time for elem in kvalstream]
else:
kexists = False
array = [[] for key in KEYLIST]
#li = DataStream()
indvar1 = KEYLIST.index('var1')
indvar2 = KEYLIST.index('var2')
indvar3 = KEYLIST.index('var3')
if ndtype:
#array = [[] for key in KEYLIST]
for kline in klist:
time = kline[0]
if kexists:
try:
ind = list(kvalstream.ndarray[0]).index(time)
#print "Found time at index", ind
#if kvalstream.ndarray[indvar3][ind] < quality lower
kvalstream = kvalstream._delete(ind)
except:
pass
kvalstream.ndarray[0] = np.append(kvalstream.ndarray[0],kline[0])
kvalstream.ndarray[indvar1] = np.append(kvalstream.ndarray[indvar1],kline[1])
kvalstream.ndarray[indvar2] = np.append(kvalstream.ndarray[indvar2],kline[2])
kvalstream.ndarray[indvar3] = np.append(kvalstream.ndarray[indvar3],kline[3])
else:
# put data to kvalstream
array[0].append(kline[0])
array[indvar1].append(kline[1])
array[indvar2].append(kline[2])
array[indvar3].append(kline[3]) # Quality parameter - containg time coverage
# High quality replaces low quality
if not kexists:
array[0] = np.asarray(array[0])
array[indvar1] = np.asarray(array[indvar1])
array[indvar2] = np.asarray(array[indvar2])
kvalstream.ndarray = np.asarray(array)
return kvalstream
def maxmink(datastream, cdlist, index, k_scale, ndtype=True, **kwargs):
# function returns 3 hour k values for a 24 hour minute time series
# The following function is used several times on different !!!!! 24h !!!!!!! timeseries
# (with and without removal of daily-quiet signals)
checky = kwargs.get('checky')
xmaxval = 0
xminval = 0
ymaxval = 0
yminval = 0
deltaday = 0
klist = []
for j in range(0,8):
if debug:
print("Loop Test", j, index, num2date(cdlist[index])-timedelta(days=deltaday))
#t7 = datetime.utcnow()
#threehours = datastream.extract("time", date2num(num2date(cdlist[index])-timedelta(days=deltaday)), "<")
et = date2num(num2date(cdlist[index])-timedelta(days=deltaday))
index = index - 1
if index < 0:
index = 7
deltaday += 1
if debug:
print("Start", num2date(cdlist[index])-timedelta(days=deltaday))
#threehours = threehours.extract("time", date2num(num2date(cdlist[index])-timedelta(days=deltaday)), ">=")
st = date2num(num2date(cdlist[index])-timedelta(days=deltaday))
ar = datastream._select_timerange(starttime=st, endtime=et)
threehours = DataStream([LineStruct()],{},ar)
#print("ET",st,et)
#t8 = datetime.utcnow()
#print("Extracting time needed:", t8-t7)
if ndtype:
len3hours = len(threehours.ndarray[0])
else:
len3hours = len(threehours)
if debug:
print("Length of three hour segment", len3hours)
if len3hours > 0:
if ndtype:
indx = KEYLIST.index('x')
indy = KEYLIST.index('y')
colx = threehours.ndarray[indx]
else:
colx = threehours._get_column('x')
colx = [elem for elem in colx if not isnan(elem)]
if len(colx) > 0:
xmaxval = max(colx)
xminval = min(colx)
else:
ymaxval = 0.0
yminval = 0.0
if checky:
if ndtype:
coly = threehours.ndarray[indy]
else:
coly = threehours._get_column('y')
coly = [elem for elem in coly if not isnan(elem)]
ymaxval = max(coly)
yminval = min(coly)
else:
ymaxval = 0.0
yminval = 0.0
maxmindiff = max([xmaxval-xminval, ymaxval-yminval])
k = np.nan
for count,val in enumerate(k_scale):
if maxmindiff > val:
k = count
if np.isnan(k):
maxmindiff = np.nan
if debug:
print("Extrema", k, maxmindiff, xmaxval, xminval, ymaxval, yminval)
# create a k-value list
else:
k = np.nan
maxmindiff = np.nan
ti = date2num(num2date(cdlist[index])-timedelta(days=deltaday)+timedelta(minutes=90))
klist.append([ti,k,maxmindiff,1])
return klist
def fmimeans(datastream, laststep, kvalstream, ndtype=True):
# function returns 3 hour k values for a 24 hour minute time series
deltaday = 0
hmlist = []
meanstream = DataStream()
lasthour = num2date(laststep).replace(minute=0, second=0, microsecond=0)
for j in range(0,24):
#if debug:
# print "Loop Test", j
# last hour
index = lasthour.hour
index = index - 1
if index < 0:
index = 23
#if debug:
#print index
meanat = lasthour - timedelta(minutes=30)
#get m (using index)
#if debug:
#print int(np.floor(index/3.))
m = mlist[int(np.floor(index/3.))]
#if debug:
#print "m:", m
#get n
# test: find nearest kval from kvalstream
idx = (np.abs(kvalstream.ndarray[0].astype(float)-date2num(meanat))).argmin()
kval = kvalstream.ndarray[KEYLIST.index('var1')][idx]
if not np.isnan(kval):
n = kval**3.3
else:
n = 0
# extract meanat +/- (30+m+n)
valrange = datastream.extract("time", date2num(meanat+timedelta(minutes=30)+timedelta(minutes=m)+timedelta(minutes=n)), "<")
valrange = valrange.extract("time", date2num(meanat-timedelta(minutes=30)-timedelta(minutes=m)-timedelta(minutes=n)), ">=")
#if debug:
#print "Length of Sequence", len(valrange), num2date(valrange[0].time), num2date(valrange[-1].time)
if ndtype:
firsttime = np.min(datastream.ndarray[0])
else:
firsttime = datastream[0].time
if not firsttime < date2num(meanat-timedelta(minutes=30)-timedelta(minutes=m)-timedelta(minutes=n)):
print("##############################################")
print(" careful - datastream not long enough for correct k determination")
print("##############################################")
print("Hourly means not correctly determinable for day", meanat)
print("as the extended time range is not reached")
print("----------------------------------------------")
kvalstream.ndarray[KEYLIST.index('var3')][idx] = 0.5
#return meanstream
# Now get the means
meanx = valrange.mean('x')
meany = valrange.mean('y')
meanz = valrange.mean('z')
hmlist.append([date2num(meanat),meanx,meany,meanz])
# Describe why we are duplicating values at the end and the beginning!!
# Was that necessary for the polyfit??
if j == 0:
hmlist.append([date2num(meanat+timedelta(minutes=30)+timedelta(minutes=m)+timedelta(minutes=n)),meanx,meany,meanz])
if j == 23:
hmlist.append([date2num(meanat-timedelta(minutes=30)-timedelta(minutes=m)-timedelta(minutes=n)),meanx,meany,meanz])
lasthour = lasthour - timedelta(hours=1)
if ndtype:
array = [[] for key in KEYLIST]
indx = KEYLIST.index('x')
indy = KEYLIST.index('y')
indz = KEYLIST.index('z')
array[0] = np.asarray([elem[0] for elem in hmlist])
array[indx] = np.asarray([elem[1] for elem in hmlist])
array[indy] = np.asarray([elem[2] for elem in hmlist])
array[indz] = np.asarray([elem[3] for elem in hmlist])
meanstream.ndarray = np.asarray(array)
else:
for elem in sorted(hmlist):
line = LineStruct()
line.time = elem[0]
line.x = elem[1]
line.y = elem[2]
line.z = elem[3]
meanstream.add(line)
#print klist
return meanstream.sorting()
# ############################################
# ## Step 2 ##############
# ## ------------------------ ##############
# ## analyze last 24 h: ##############
# ## - get last day ##############
# ## - get last 3hour segment ##############
# ## - run backwards ##############
# ## - calc fmi: ##############
# ## - 1. get max/min deviation ###########
# ## - 2. use this k to get sr ###########
# ## - 3. calc k with sr reduced ##########
# ## - 4. recalc sr ##########
# ## - 5. final k ##########
# ############################################
if ndtype:
currentdate = num2date(np.max(fmistream.ndarray[0])).replace(tzinfo=None)
lastdate = currentdate
d = currentdate.date()
currentdate = datetime.combine(d, datetime.min.time())
else:
currentdate = num2date(fmistream[-1].time).replace(tzinfo=None)
lastdate = currentdate
d = currentdate.date()
currentdate = datetime.combine(d, datetime.min.time())
print("Last effective time series ending at day", currentdate)
print(" -----------------------------------------------------")
print(" ------------- Starting backward analysis ------------")
print(" --------------- beginning at last time --------------")
# selecting reduced time range!!!
t1 = datetime.utcnow()
array = fmistream._select_timerange(starttime=currentdate-timedelta(days=2))
fmitstream = DataStream([LineStruct()],fmistream.header,array)
cdlist = [date2num(currentdate.replace(hour=elem)) for elem in startinghours]
#print("Daily list", cdlist, currentdate)
t2 = datetime.utcnow()
print("Step0 needed:", t2-t1)
#ta, i = find_nearest(np.asarray(cdlist), date2num(lastdate-timedelta(minutes=90)))
ta, i = find_nearest(np.asarray(cdlist), date2num(lastdate))
if i < 7:
i=i+1
else:
i=0
cdlist = [el+1 for el in cdlist]
#print("Nearest three hour mark", num2date(ta), i, np.asarray(cdlist))
if plot:
import magpy.mpplot as mp
fmistream.plot(noshow=True, plottitle="0")
# 1. get a backward 24 hour calculation from the last record
klist = maxmink(fmitstream,cdlist,i,k_scale)
#print(klist, i)
kstream = klist2stream(klist, kstream)
t3 = datetime.utcnow()
print("Step1 needed:", t3-t2)
# 2. a) now get the hourly means with extended time ranges (sr function)
hmean = fmimeans(fmitstream,date2num(lastdate),kstream)
func = hmean.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
if plot:
hmean.plot(function=func,noshow=True, plottitle="1: SR function")
# 2. b) subtract sr from original record
#redfmi = fmistream.func_subtract(func)
redfmi = fmistream.func2stream(func,mode='sub')
if plot:
redfmi.plot(noshow=True, plottitle="1: reduced")
fmistream.plot(noshow=True, plottitle="1")
t4 = datetime.utcnow()
print("Step2 needed:", t4-t3)
# 3. recalc k
klist = maxmink(redfmi,cdlist,i,k_scale)
kstream = klist2stream(klist, kstream)
#print ("3.", num2date(kstream.ndarray[0]))
t5 = datetime.utcnow()
print("Step3 needed:", t5-t4)
# 4. recalc sr and subtract
finalhmean = fmimeans(fmitstream,date2num(lastdate),kstream)
finalfunc = finalhmean.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
firedfmi = fmistream.func2stream(finalfunc,mode='sub')
if plot:
mp.plot(finalhmean,['x','y','z'],function=finalfunc,noshow=True, plottitle="2: SR function")
#finalhmean.plot(['x','y','z'],function=finalfunc,noshow=True, plottitle="2: SR function")
firedfmi.plot(['x','y','z'],noshow=True, plottitle="2: reduced")
fmitstream.plot(['x','y','z'],plottitle="2")
t6 = datetime.utcnow()
print("Step4 needed:", t6-t5)
# 5. final k
klist = maxmink(firedfmi,cdlist,i,k_scale)
kstream = klist2stream(klist, kstream)
#print ("Last", num2date(kstream.ndarray[0]))
t7 = datetime.utcnow()
print("Step5 needed:", t7-t6)
# ############################################
# ## Step 3 ##############
# ## ------------------------ ##############
# ## analyze from beginning: ##############
# ## - get first record ##############
# ## - from day to day ##############
# ## - run backwards ##############
# ## - calc fmi: ##############
# ## - 1. get max/min deviation ###########
# ## - 2. use this k to get sr ###########
# ## - 3. calc k with sr reduced ##########
# ## - 4. recalc sr ##########
# ## - 5. final k ##########
# ############################################
print(" -----------------------------------------------------")
print(" ------------- Starting forward analysis -------------")
print(" ----------------- from first date ------------------")
if ndtype:
st = np.min(fmistream.ndarray[0])
else:
st = fmistream[0].time
startday = int(np.floor(st))
for daynum in range(1,int(timediff)+1):
currentdate = num2date(startday+daynum)
print("Running daily chunks forward until ", currentdate)
# selecting reduced time range!!!
array = fmistream._select_timerange(starttime=currentdate-timedelta(days=3),endtime=currentdate+timedelta(days=1))
fmitstream = DataStream([LineStruct()],fmistream.header,array)
cdlist = [date2num(currentdate.replace(hour=elem)) for elem in startinghours]
#print "Daily list", cdlist
# 1. get a backward 24 hour calculation from the last record
klist = maxmink(fmitstream,cdlist,0,k_scale)
#print("forward", klist)
kstream = klist2stream(klist, kstream)
# 2. a) now get the hourly means with extended time ranges (sr function)
hmean = fmimeans(fmitstream,startday+daynum,kstream)
if ndtype:
lenhmean = len(hmean.ndarray[0])
else:
lenhmean = len(hmean)
if not lenhmean == 0: # Length 0 if not enough data for full extended mean value calc
func = hmean.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
#hmean.plot(function=func,noshow=True)
if not func[0] == {}:
if plot:
fmistream.plot(noshow=True)
# 2. b) subtract sr from original record
redfmi = fmitstream.func2stream(func,mode='sub')
# 3. recalc k
klist = maxmink(redfmi,cdlist,0,k_scale)
kstream = klist2stream(klist, kstream)
#print klist
# 4. recalc sr and subtract
finalhmean = fmimeans(fmitstream,startday+daynum,kstream)
finalfunc = finalhmean.fit(['x','y','z'],fitfunc='harmonic',fitdegree=5)
firedfmi = fmistream.func2stream(finalfunc,mode='sub')
if plot:
finalhmean.plot(['x','y','z'],noshow=True, function=finalfunc, plottitle="2")
firedfmi.plot(['x','y','z'],noshow=True, plottitle="2: reduced")
fmitstream.plot(['x','y','z'], plottitle="2: fmistream")
# 5. final k
klist = maxmink(firedfmi,cdlist,0,k_scale)
kstream = klist2stream(klist, kstream)
#print "Final", klist
#print kstream.ndarray, klist
kstream = kstream.sorting()
kstream.header['col-var1'] = 'K'
kstream.header['col-var2'] = 'C'
kstream.header['col-var3'] = 'Quality'
#print ("Test",kstream.ndarray)
return DataStream([LineStruct()],kstream.header,kstream.ndarray)
"""
outstream = DataStream()
lst = [[elem.time,elem.var1,elem.var2] for elem in kstream]
for el in sorted(lst):
line = LineStruct()
line.time = el[0]
line.var1 = el[1]
line.var2 = el[2]
outstream.add(line)
return outstream
"""
def linestruct2ndarray(self):
"""
DEFINITION:
Converts linestruct data to ndarray.
RETURNS:
- self with ndarray filled
EXAMPLE:
>>> data = data.linestruct2ndarray()
APPLICATION:
"""
def checkEqual3(lst):
return lst[1:] == lst[:-1]
array = [np.asarray([]) for elem in KEYLIST]
keys = self._get_key_headers()
t = np.asarray(self._get_column('time'))
array[0] = t
for key in keys:
ind = KEYLIST.index(key)
col = self._get_column(key)
if len(col) > 0:
if not False in checkEqual3(col) and str(col[0]) == str('-'):
col = np.asarray([])
array[ind] = col
else:
array[ind] = []
array = np.asarray(array,dtype=object)
steam = DataStream()
stream = [LineStruct()]
return DataStream(stream,self.header,array)
def mean(self, key, **kwargs):
"""
DEFINITION:
Calculates mean values for the specified key, Nan's are regarded for.
Means are only calculated if more then "amount" in percent are non-nan's
Returns a float if successful or NaN.
PARAMETERS:
Variables:
- key: (KEYLIST) element of Keylist like 'x' .
Kwargs:
- percentage: (int) Define required percentage of non-nan values, if not
met that nan will be returned. Default is 95 (%)
- meanfunction: (string) accepts 'mean' and 'median'. Default is 'mean'
- std: (bool) if true, the standard deviation is returned as well
RETURNS:
- mean/median(, std) (float)
EXAMPLE:
>>> meanx = datastream.mean('x',meanfunction='median',percentage=90)
APPLICATION:
stream = read(datapath)
mean = stream.mean('f')
median = stream.mean('f',meanfunction='median')
stddev = stream.mean('f',std=True)
"""
percentage = kwargs.get('percentage')
meanfunction = kwargs.get('meanfunction')
std = kwargs.get('std')
if not meanfunction:
meanfunction = 'mean'
if not percentage:
percentage = 95
if not std:
std = False
ndtype = False
if len(self.ndarray[0])>0:
ndtype = True
elif len(self) > 0:
pass
else:
logger.error('mean: empty stream - aborting')
if std:
return float("NaN"), float("NaN")
else:
return float("NaN")
try: #python2
if not isinstance( percentage, (int,long)):
logger.error("mean: Percentage needs to be an integer!")
except:
if not isinstance( percentage, (int)):
logger.error("mean: Percentage needs to be an integer!")
if not key in KEYLIST[:16]:
logger.error("mean: Column key not valid!")
if ndtype:
ind = KEYLIST.index(key)
length = len(self.ndarray[0])
self.ndarray[ind] = np.asarray(self.ndarray[ind])
ar = self.ndarray[ind].astype(float)
ar = ar[~np.isnan(ar)]
else:
ar = [getattr(elem,key) for elem in self if not isnan(getattr(elem,key))]
length = float(len(self))
div = float(len(ar))/length*100.0
if div >= percentage:
if std:
return eval('np.'+meanfunction+'(ar)'), np.std(ar)
else:
return eval('np.'+meanfunction+'(ar)')
else:
logger.info('mean: Too many nans in column {}, exceeding {} percent'.format(key,percentage))
if std:
return float("NaN"), float("NaN")
else:
return float("NaN")
def missingvalue(self,v,window_len,threshold=0.9,fill='mean'):
"""
DESCRIPTION
fills missing values either with means or interpolated values
PARAMETER:
v: (np.array) single column of ndarray
window_len: (int) length of window to check threshold
threshold: (float) minimum percentage of available data e.g. 0.9 - 90 precent
fill: (string) 'mean' or 'interpolation'
RETURNS:
ndarray - single column
"""
try:
v_rest = np.array([])
v = v.astype(float)
n_split = len(v)/float(window_len)
if not n_split == int(n_split):
el = int(int(n_split)*window_len)
v_rest = v[el:]
v = v[:el]
spli = np.split(v,int(len(v)/window_len))
if len(v_rest) > 0:
spli.append(v_rest)
newar = np.array([])
for idx,ar in enumerate(spli):
nans, x = nan_helper(ar)
if len(ar[~nans]) >= threshold*len(ar):
if fill == 'mean':
ar[nans]= np.nanmean(ar)
else:
ar[nans]= interp(x(nans), x(~nans), ar[~nans])
newar = np.concatenate((newar,ar))
v = newar
except:
print ("Filter: could not split stream in equal parts for interpolation - switching to conservative mode")
return v
def MODWT_calc(self,key='x',wavelet='haar',level=1,plot=False,outfile=None,
window=5):
"""
DEFINITION:
Multiple Overlap Discrete wavelet transform (MODWT) method of analysing a magnetic signal
to pick out SSCs. This method was taken from Hafez (2013b): "Geomagnetic Sudden
Commencement Automatic Detection via MODWT"
(NOTE: PyWavelets package must be installed for this method. It should be applied
to 1s data - otherwise the sample window and detection levels should be changed.)
METHOD:
1. Use the Haar wavelet filter to calculate the 1st and 2nd details
of the geomagnetic signal.
2. The 1st detail (D1) samples are squared to evaluate the magnitude.
3. The sample window (5) is averaged to avoid ripple effects. (This means the
returned stream will have ~1/5 the size of the original.)
PARAMETERS:
Variables:
- key: (str) Apply MODWT to this key. Default 'x' due to SSCs dominating
the horizontal component.
- wavelet: (str) Type of filter to use. Default 'db4' (4th-order Daubechies
wavelet filter) according to Hafez (2013).
- level: (int) Decomposition level. Will calculate details down to this level.
Default 3, also Hafez (2013).
- plot: (bool) If True, will display a plot of A3, D1, D2 and D3.
- outfile: (str) If given, will plot will be saved to 'outfile' path.
- window: (int) Length of sample window. Default 5, i.e. 5s with second data.
RETURNS:
- MODWT_stream: (DataStream object) A stream containing the following:
'x': A_n (approximation function)
'var1': D1 (first detail)
'var2': D2 (second detail)
...
'var3': D3 (third detail)
...
EXAMPLE:
>>> DWT_stream = stream.DWT_calc(plot=True)
APPLICATION:
# Storm detection using detail 3 (D3 = var3):
from magpy.stream import *
stream = read('LEMI_1s_Data_2014-02-15.cdf') # 2014-02-15 is a good storm example
MODWT_stream = stream.MODWT_calc(plot=True)
Da_min = 0.0005 # nT^2 (minimum amplitude of D3 for storm detection)
Dp_min = 40 # seconds (minimum period of Da > Da_min for storm detection)
detection = False
for row in MODWT_stream:
if row.var3 >= Da_min and detection == False:
timepin = row.time
detection = True
elif row.var3 < Da_min and detection == True:
duration = (num2date(row.time) - num2date(timepin)).seconds
if duration >= Dp_min:
print "Storm detected!"
print duration, num2date(timepin)
detection = False
"""
# Import required package PyWavelets:
# http://www.pybytes.com/pywavelets/index.html
import pywt
# 1a. Grab array from stream
data = self._get_column(key)
t_ind = KEYLIST.index('time')
#MODWT_stream = DataStream([],{})
MODWT_stream = DataStream()
headers = MODWT_stream.header
array = [[] for key in KEYLIST]
x_ind = KEYLIST.index('x')
dx_ind = KEYLIST.index('dx')
var1_ind = KEYLIST.index('var1')
var2_ind = KEYLIST.index('var2')
var3_ind = KEYLIST.index('var3')
var4_ind = KEYLIST.index('var4')
var5_ind = KEYLIST.index('var5')
dy_ind = KEYLIST.index('dy')
i = 0
logger.info("MODWT_calc: Starting Discrete Wavelet Transform of key %s." % key)
if len(data) % 2 == 1:
data = data[0:-1]
# Results have format:
# (cAn, cDn), ..., (cA2, cD2), (cA1, cD1)
coeffs = pywt.swt(data, wavelet, level)
acoeffs, dcoeffs = [], []
for i in xrange(level):
(a, d) = coeffs[i]
acoeffs.append(a)
dcoeffs.append(d)
for i, item in enumerate(dcoeffs):
dcoeffs[i] = [j**2 for j in item]
# 1b. Loop for sliding window
while True:
if i >= (len(data)-window):
break
# Take the values in the middle of the window (not exact but changes are
# not extreme over standard 5s window)
array[t_ind].append(self.ndarray[t_ind][i+window/2])
data_cut = data[i:i+window]
array[x_ind].append(sum(data_cut)/float(window))
a_cut = acoeffs[0][i:i+window]
array[dx_ind].append(sum(a_cut)/float(window))
for j in xrange(level):
d_cut = dcoeffs[-(j+1)][i:i+window]
if j <= 5:
key = 'var'+str(j+1)
array[KEYLIST.index(key)].append(sum(d_cut)/float(window))
elif 5 < j <= 7:
if j == 6:
key = 'dy'
elif j == 7:
key = 'dz'
array[KEYLIST.index(key)].append(sum(d_cut)/float(window))
i += window
logger.info("MODWT_calc: Finished MODWT.")
MODWT_stream.header['col-x'] = 'A3'
MODWT_stream.header['unit-col-x'] = 'nT^2'
MODWT_stream.header['col-var1'] = 'D1'
MODWT_stream.header['unit-col-var1'] = 'nT^2'
MODWT_stream.header['col-var2'] = 'D2'
MODWT_stream.header['unit-col-var2'] = 'nT^2'
MODWT_stream.header['col-var3'] = 'D3'
MODWT_stream.header['unit-col-var3'] = 'nT^2'
MODWT_stream.header['col-var4'] = 'D4'
MODWT_stream.header['unit-col-var4'] = 'nT^2'
MODWT_stream.header['col-var5'] = 'D5'
MODWT_stream.header['unit-col-var5'] = 'nT^2'
MODWT_stream.header['col-dy'] = 'D6'
MODWT_stream.header['unit-col-dy'] = 'nT^2'
# Plot stream:
if plot == True:
date = datetime.strftime(num2date(self.ndarray[0][0]),'%Y-%m-%d')
logger.info('MODWT_calc: Plotting data...')
if outfile:
MODWT_stream.plot(['x','var1','var2','var3'],
plottitle="MODWT Decomposition of %s (%s)" % (key,date),
outfile=outfile)
else:
MODWT_stream.plot(['x','var1','var2','var3'],
plottitle="MODWT Decomposition of %s (%s)" % (key,date))
for key in KEYLIST:
array[KEYLIST.index(key)] = np.asarray(array[KEYLIST.index(key)])
return DataStream([LineStruct()], headers, np.asarray(array,dtype=object))
def multiply(self, factors, square=False):
"""
DEFINITION:
A function to multiply the datastream, should one ever have the need to.
Scale value correction for example.
PARAMETERS:
Variables:
- factors: (dict) Dictionary of multiplcation factors with keys to apply to
e.g. {'x': -1, 'f': 2}
Kwargs:
- square: (bool) If True, key will be squared by the factor.
RETURNS:
- self: (DataStream) Multiplied datastream.
EXAMPLE:
>>> data.multiply({'x':-1})
APPLICATION:
"""
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
sel = self.copy()
for key in factors:
if key in KEYLIST:
if ndtype:
ind = KEYLIST.index(key)
val = sel.ndarray[ind]
else:
val = sel._get_column(key)
if key == 'time':
logger.error("factor: Multiplying time? That's just plain silly.")
else:
if square == False:
newval = [elem * factors[key] for elem in val]
logger.info('factor: Multiplied column %s by %s.' % (key, factors[key]))
else:
newval = [elem ** factors[key] for elem in val]
logger.info('factor: Multiplied column %s by %s.' % (key, factors[key]))
if ndtype:
sel.ndarray[ind] = np.asarray(newval)
else:
sel = sel._put_column(newval, key)
else:
logger.warning("factor: Key '%s' not in keylist." % key)
return sel
def obspyspectrogram(self, data, samp_rate, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
mult=8.0, cmap=None, zorder=None, title=None, show=True,
sphinx=False, clip=[0.0, 1.0]):
#TODO: Discuss with Ramon which kind of window should be used (cos^2(2*pi (t/T)))
"""
Function taken from ObsPy
Computes and plots spectrogram of the input data.
:param data: Input data
:type samp_rate: float
:param samp_rate: Samplerate in Hz
:type per_lap: float
:param per_lap: Percentage of overlap of sliding window, ranging from 0
to 1. High overlaps take a long time to compute.
:type wlen: int or float
:param wlen: Window length for fft in seconds. If this parameter is too
small, the calculation will take forever.
:type log: bool
:param log: Logarithmic frequency axis if True, linear frequency axis
otherwise.
:type outfile: String
:param outfile: String for the filename of output file, if None
interactive plotting is activated.
:type fmt: String
:param fmt: Format of image to save
:type axes: :class:`matplotlib.axes.Axes`
:param axes: Plot into given axes, this deactivates the fmt and
outfile option.
:type dbscale: bool
:param dbscale: If True 10 * log10 of color values is taken, if False the
sqrt is taken.
:type mult: float
:param mult: Pad zeros to lengh mult * wlen. This will make the spectrogram
smoother. Available for matplotlib > 0.99.0.
:type cmap: :class:`matplotlib.colors.Colormap`
:param cmap: Specify a custom colormap instance
:type zorder: float
:param zorder: Specify the zorder of the plot. Only of importance if other
plots in the same axes are executed.
:type title: String
:param title: Set the plot title
:type show: bool
:param show: Do not call `plt.show()` at end of routine. That way, further
modifications can be done to the figure before showing it.
:type sphinx: bool
:param sphinx: Internal flag used for API doc generation, default False
:type clip: [float, float]
:param clip: adjust colormap to clip at lower and/or upper end. The given
percentages of the amplitude range (linear or logarithmic depending
on option `dbscale`) are clipped.
"""
# enforce float for samp_rate
samp_rate = float(samp_rate)
# set wlen from samp_rate if not specified otherwise
if not wlen:
wlen = samp_rate / 100.
npts = len(data)
# nfft needs to be an integer, otherwise a deprecation will be raised
#XXX add condition for too many windows => calculation takes for ever
nfft = int(nearestPow2(wlen * samp_rate))
if nfft > npts:
nfft = int(nearestPow2(npts / 8.0))
if mult != None:
mult = int(nearestPow2(mult))
mult = mult * nfft
nlap = int(nfft * float(per_lap))
data = data - data.mean()
end = npts / samp_rate
# Here we call not plt.specgram as this already produces a plot
# matplotlib.mlab.specgram should be faster as it computes only the
# arrays
# XXX mlab.specgram uses fft, would be better and faster use rfft
if MATPLOTLIB_VERSION >= [0, 99, 0]:
specgram, freq, time = mlab.specgram(data, Fs=samp_rate, NFFT=nfft,
pad_to=mult, noverlap=nlap)
else:
specgram, freq, time = mlab.specgram(data, Fs=samp_rate,
NFFT=nfft, noverlap=nlap)
# db scale and remove zero/offset for amplitude
if dbscale:
specgram = 10 * np.log10(specgram[1:, :])
else:
specgram = np.sqrt(specgram[1:, :])
freq = freq[1:]
vmin, vmax = clip
if vmin < 0 or vmax > 1 or vmin >= vmax:
msg = "Invalid parameters for clip option."
raise ValueError(msg)
_range = float(specgram.max() - specgram.min())
vmin = specgram.min() + vmin * _range
vmax = specgram.min() + vmax * _range
norm = Normalize(vmin, vmax, clip=True)
if not axes:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes
# calculate half bin width
halfbin_time = (time[1] - time[0]) / 2.0
halfbin_freq = (freq[1] - freq[0]) / 2.0
if log:
# pcolor expects one bin more at the right end
freq = np.concatenate((freq, [freq[-1] + 2 * halfbin_freq]))
time = np.concatenate((time, [time[-1] + 2 * halfbin_time]))
# center bin
time -= halfbin_time
freq -= halfbin_freq
# pcolormesh issue was fixed in matplotlib r5716 (2008-07-07)
# inbetween tags 0.98.2 and 0.98.3
# see:
# - http://matplotlib.svn.sourceforge.net/viewvc/...
# matplotlib?revision=5716&view=revision
# - http://matplotlib.sourceforge.net/_static/CHANGELOG
if MATPLOTLIB_VERSION >= [0, 98, 3]:
# Log scaling for frequency values (y-axis)
ax.set_yscale('log')
# Plot times
ax.pcolormesh(time, freq, specgram, cmap=cmap, zorder=zorder,
norm=norm)
else:
X, Y = np.meshgrid(time, freq)
ax.pcolor(X, Y, specgram, cmap=cmap, zorder=zorder, norm=norm)
ax.semilogy()
else:
# this method is much much faster!
specgram = np.flipud(specgram)
# center bin
extent = (time[0] - halfbin_time, time[-1] + halfbin_time,
freq[0] - halfbin_freq, freq[-1] + halfbin_freq)
ax.imshow(specgram, interpolation="nearest", extent=extent,
cmap=cmap, zorder=zorder)
# set correct way of axis, whitespace before and after with window
# length
ax.axis('tight')
ax.set_xlim(0, end)
ax.grid(False)
if axes:
return ax
ax.set_xlabel('Time [s]')
ax.set_ylabel('Frequency [Hz]')
if title:
ax.set_title(title)
if not sphinx:
# ignoring all NumPy warnings during plot
temp = np.geterr()
np.seterr(all='ignore')
plt.draw()
np.seterr(**temp)
if outfile:
if fmt:
fig.savefig(outfile, format=fmt)
else:
fig.savefig(outfile)
elif show:
plt.show()
else:
return fig
def offset(self, offsets, **kwargs):
"""
DEFINITION:
Apply constant offsets to elements of the datastream
PARAMETERS:
Variables:
- offsets: (dict) Dictionary of offsets with keys to apply to
e.g. {'time': timedelta(hours=1), 'x': 4.2, 'f': -1.34242}
Important: Time offsets have to be timedelta objects
Kwargs:
- starttime: (Datetime object) Start time to apply offsets
- endtime : (Datetime object) End time to apply offsets
RETURNS:
- variable: (type) Description.
EXAMPLE:
>>> data.offset({'x':7.5})
or
>>> data.offset({'x':7.5},starttime='2015-11-21 13:33:00',starttime='2015-11-23 12:22:00')
APPLICATION:
"""
endtime = kwargs.get('endtime')
starttime = kwargs.get('starttime')
comment = kwargs.get('comment')
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype =True
tcol = self.ndarray[0]
else:
tcol = self._get_column('time')
if not len(tcol) > 0:
logger.error("offset: No data found - aborting")
return self
stidx = 0
edidx = len(tcol)
if starttime:
st = date2num(self._testtime(starttime))
# get index number of first element >= starttime in timecol
stidxlst = np.where(tcol >= st)[0]
if not len(stidxlst) > 0:
return self ## stream ends before starttime
stidx = stidxlst[0]
if endtime:
ed = date2num(self._testtime(endtime))
# get index number of last element <= endtime in timecol
edidxlst = np.where(tcol <= ed)[0]
if not len(edidxlst) > 0:
return self ## stream begins after endtime
edidx = (edidxlst[-1]) + 1
if comment and not comment == '':
if len(self.ndarray[0]) > 0:
commpos = KEYLIST.index('comment')
flagpos = KEYLIST.index('flag')
commcol = self.ndarray[commpos]
else:
commcol = self._get_column('comment')
if not len(commcol) == len(tcol):
commcol = [''] * len(tcol)
if not len(self.ndarray[flagpos]) == len(tcol):
fllist = ['0' for el in FLAGKEYLIST]
fllist.append('-')
fl = ''.join(fllist)
self.ndarray[flagpos] = [fl] * len(tcol)
for idx,el in enumerate(commcol):
if idx >= stidx and idx <= edidx:
if not el == '':
commcol[idx] = comment + ', ' + el
else:
commcol[idx] = comment
else:
commcol[idx] = el
print("offset", len(commcol), len(tcol))
self.ndarray[commpos] = commcol
for key in offsets:
if key in KEYLIST:
if ndtype:
ind = KEYLIST.index(key)
val = self.ndarray[ind]
else:
val = self._get_column(key)
val = val[stidx:edidx]
if key == 'time':
secperday = 24*3600
try:
os = offsets[key].total_seconds()/secperday
except:
try:
exec('os = '+offsets[key]+'.total_seconds()/secperday')
except:
print("offset: error with time offset - check provided timedelta")
break
val = val + os
#print num2date(val[0]).replace(tzinfo=None)
#print num2date(val[0]).replace(tzinfo=None) + offsets[key]
#newval = [date2num(num2date(elem).replace(tzinfo=None) + offsets[key]) for elem in val]
logger.info('offset: Corrected time column by %s sec' % str(offsets[key]))
else:
val = val + offsets[key]
#newval = [elem + offsets[key] for elem in val]
logger.info('offset: Corrected column %s by %.3f' % (key, offsets[key]))
if ndtype:
self.ndarray[ind][stidx:edidx] = val
else:
nval = self._get_column(key) # repeated extraction of column - could be optimzed but usage of LineStruct will not be supported in future
nval[stidx:edidx] = val
self = self._put_column(nval, key)
else:
logger.error("offset: Key '%s' not in keylist." % key)
return self
def plot(self, keys=None, debugmode=None, **kwargs):
"""
DEFINITION:
Code for plotting one dataset. Consult mpplot.plot() and .plotStreams() for more
details.
EXAMPLE:
>>> cs1_data.plot(['f'],
outfile = 'frequenz.png',
specialdict = {'f':[44184.8,44185.8]},
plottitle = 'Station Graz - Feldstaerke 05.08.2013',
bgcolor='white')
"""
import magpy.mpplot as mp
if keys == None:
keys = []
mp.plot(self, variables=keys, **kwargs)
def powerspectrum(self, key, debugmode=None, outfile=None, fmt=None, axes=None, title=None,**kwargs):
"""
DEFINITION:
Calculating the power spectrum
following the numpy fft example
PARAMETERS:
Variables:
- key: (str) Key to analyse
Kwargs:
- axes: (?) ?
- debugmode: (bool) Variable to show steps
- fmt: (str) Format of outfile, e.g. "png"
- outfile: (str) Filename to save plot to
- title: (str) Title to display on plot
- marks: (dict) add some text to the plot
- returndata: (bool) return freq and asd
- freqlevel: (float) print noise level at that frequency
RETURNS:
- plot: (matplotlib plot) A plot of the powerspectrum
EXAMPLE:
>>> data_stream.powerspectrum('x')
APPLICATION:
>>> from magpy.stream import read
1. Requires DataStream object:
>>> data_path = '/usr/lib/python2.7/magpy/examples/*'
>>> data = read(path_or_url=data_path,
starttime='2013-06-10 00:00:00',
endtime='2013-06-11 00:00:00')
2. Call for data stream:
>>> data.powerspectrum('f',
title='PSD of f', marks={'day':0.000011574},
outfile='ps.png')
"""
if debugmode:
print("Start powerspectrum at %s" % datetime.utcnow())
noshow = kwargs.get('noshow')
returndata = kwargs.get('returndata')
marks = kwargs.get('marks')
freqlevel = kwargs.get('freqlevel')
if noshow:
show = False
else:
show = True
dt = self.get_sampling_period()*24*3600
if not len(self) > 0:
logger.error("Powerspectrum: Stream of zero length -- aborting")
raise Exception("Can't analyse stream of zero length!")
t = np.asarray(self._get_column('time'))
val = np.asarray(self._get_column(key))
mint = np.min(t)
tnew, valnew = [],[]
nfft = int(nearestPow2(len(t)))
#print "NFFT:", nfft
if nfft > len(t):
nfft = int(nearestPow2(len(t) / 2.0))
#print "NFFT now:", nfft
for idx, elem in enumerate(val):
if not isnan(elem):
tnew.append((t[idx]-mint)*24*3600)
valnew.append(elem)
tnew = np.asarray(tnew)
valnew = np.asarray(valnew)
if debugmode:
print("Extracted data for powerspectrum at %s" % datetime.utcnow())
#freq = np.fft.fftfreq(tnew.shape[-1],dt)
#freq = freq[range(len(tnew)/2)] # one side frequency range
#freq = freq[1:]
#print "Maximum frequency:", max(freq)
#s = np.fft.fft(valnew)
#s = s[range(len(valnew)/2)] # one side data range
#s = s[1:]
#ps = np.real(s*np.conjugate(s))
if not axes:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes
psdm = mlab.psd(valnew, nfft, 1/dt)
asdm = np.sqrt(psdm[0])
freqm = psdm[1]
ax.loglog(freqm, asdm,'b-')
#print "Maximum frequency:", max(freqm)
if freqlevel:
val, idx = find_nearest(freqm, freqlevel)
print("Maximum Noise Level at %s Hz: %s" % (val,asdm[idx]))
if not marks:
pass
else:
for elem in marks:
ax.annotate(elem, xy=(marks[elem],min(asdm)),
xytext=(marks[elem],max(asdm)-(max(asdm)-min(asdm))*0.3),
bbox=dict(boxstyle="round", fc="0.95", alpha=0.6),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=1,
connectionstyle="angle,angleA=0,angleB=90,rad=10"))
try:
unit = self.header['unit-col-'+key]
except:
unit = 'unit'
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel(('Amplitude spectral density [%s/sqrt(Hz)]') % unit)
if title:
ax.set_title(title)
if debugmode:
print("Finished powerspectrum at %s" % datetime.utcnow())
if outfile:
if fmt:
fig.savefig(outfile, format=fmt)
else:
fig.savefig(outfile)
elif returndata:
return freqm, asdm
elif show:
plt.show()
else:
return fig
def randomdrop(self,percentage=None,fixed_indicies=None):
"""
DESCRIPTION:
Method to randomly drop one line from data. If percentage is
given, then lines according to this percentage are dropped.
This corresponds to a jackknife and d-jackknife respectively.
PARAMETER:
percentage (float) provide a percentage value to be dropped (1-99)
fixed_indicies (list) e.g. [0,1] provide a list of indicies
which will not be dropped
RETURNS:
DataStream
APPLICATION:
>>> newstream = stream.randomdrop(percentage=10,fixed_indicies=[0,len(means.ndarray[0])-1])
"""
import random
def makeDrippingBucket(lst):
bucket = lst
if len(bucket) == 0:
return []
else:
random_index = random.randrange(0,len(bucket))
del bucket[random_index]
return bucket
if len(self.ndarray[0]) < 1:
return self
if percentage:
if percentage > 99:
percentage = 99
if percentage < 1:
percentage = 1
ns = self.copy()
if fixed_indicies:
# TODO assert list
pass
if not percentage:
newlen = len(ns.ndarray[0]) -1
else:
newlen = int(np.round(len(ns.ndarray[0])-len(ns.ndarray[0])*percentage/100.,0))
# Index list of stream
indexlst = [idx for idx, el in enumerate(ns.ndarray[0])]
#print len(indexlst), newlen
while len(indexlst) > newlen:
indexlst = makeDrippingBucket(indexlst)
if fixed_indicies:
for el in fixed_indicies:
if not el in indexlst:
indexlst.append(el)
#print "Here", len(indexlst)
for idx,ar in enumerate(ns.ndarray):
if len(ar) > 0:
#print ar, indexlst
newar = ar[indexlst]
ns.ndarray[idx] = newar
return ns
def remove(self, starttime=None, endtime=None):
"""
DEFINITION:
Removing dates inside of range between start- and endtime.
(Does the exact opposite of self.trim().)
PARAMETERS:
Variables:
- starttime: (datetime/str) Start of period to trim with
- endtime: (datetime/str) End of period to trim to
RETURNS:
- stream: (DataStream object) Stream with data between
starttime and endtime removed.
EXAMPLE:
>>> data = data.trim(starttime, endtime)
APPLICATION:
"""
if starttime and endtime:
if self._testtime(starttime) > self._testtime(endtime):
logger.error('Trim: Starttime (%s) is larger than endtime (%s).' % (starttime,endtime))
raise ValueError("Starttime is larger than endtime.")
logger.info('Remove: Started from %s to %s' % (starttime,endtime))
cutstream = DataStream()
cutstream.header = self.header
cutstream.ndarray = self.ndarray
starttime = self._testtime(starttime)
endtime = self._testtime(endtime)
stval = 0
if len(cutstream.ndarray[0]) > 0:
timearray = self.ndarray[0]
st = (np.abs(timearray.astype(float)-date2num(starttime))).argmin() - 1
ed = (np.abs(timearray.astype(float)-date2num(endtime))).argmin() + 1
if starttime < num2date(cutstream.ndarray[0][0]):
st = 0
if endtime > num2date(cutstream.ndarray[0][-1]):
ed = len(cutstream.ndarray[0])
dropind = [i for i in range(st,ed)]
for index,key in enumerate(KEYLIST):
if len(cutstream.ndarray[index])>0:
cutstream.ndarray[index] = np.delete(cutstream.ndarray[index], dropind)
else:
for idx, elem in enumerate(self):
newline = LineStruct()
if not isnan(elem.time):
newline.time = elem.time
if elem.time <= date2num(starttime) or elem.time > date2num(endtime):
for key in KEYLIST:
exec('newline.'+key+' = elem.'+key)
cutstream.add(newline)
return cutstream
def remove_flagged(self, **kwargs):
"""
DEFINITION:
remove flagged data from stream:
Flagged values are replaced by NAN values. Therefore the stream's length is not changed.
Flags are defined by integers (0 normal, 1 automatically marked, 2 to be kept,
3 to be removed, 4 special)
PARAMETERS:
Kwargs:
- keys: (list) keys (string list e.g. 'f') default=FLAGKEYLIST
- flaglist: (list) default=[1,3] defines integer codes to be removed
RETURNS:
- stream: (DataStream Object) Stream with flagged data replaced by NAN.
EXAMPLE:
>>> newstream = stream.remove_flagged()
APPLICATION:
"""
# Defaults:
flaglist = kwargs.get('flaglist')
keys = kwargs.get('keys')
if not flaglist:
flaglist = [1,3]
if not keys:
keys = FLAGKEYLIST
# Converting elements of flaglist to strings
flaglist = [str(fl) for fl in flaglist]
array = self.ndarray
ndtype = False
if len(self.ndarray[0]) > 0:
flagind = KEYLIST.index('flag')
commind = KEYLIST.index('comment')
ndtype = True
for key in keys:
pos = KEYLIST.index(key)
liste = []
emptyelem = LineStruct()
if ndtype:
# get indicies of all non-empty flag contents
indlst = [i for i,el in enumerate(self.ndarray[flagind]) if not el in ['','-']]
for i in indlst:
try:
#if len(array[pos]) > 0:
flagls = list(self.ndarray[flagind][i])
flag = flagls[pos]
if flag in flaglist:
array[pos][i] = float("nan")
except:
#print("stream remove_flagged: index error: indlst {}, pos {}, length flag colum {}".format(len(indlst), pos, len(self.ndarray[flagind])))
pass
liste = [LineStruct()]
else:
for elem in self:
fllst = list(elem.flag)
try: # test whether useful flag is present: flaglst length changed during the program development
flag = int(fllst[pos])
except:
flag = 0
if not flag in flaglist:
liste.append(elem)
else:
setattr(elem, key, float("nan"))
#exec('elem.'+key+' = float("nan")')
liste.append(elem)
#liste = [elem for elem in self if not elem.flag[pos] in flaglist]
if ndtype:
#-> Necessary to consider shape (e.g.BLV data)
newar = [np.asarray([]) for el in KEYLIST]
for idx,el in enumerate(array):
if idx == flagind:
pass
elif idx == commind:
pass
else:
newar[idx] = array[idx]
else:
newar = list(self.ndarray)
# Drop contents of flag and comment column -> didn't work for BLV data because of shape
# changed for 0.3.99
#array[flagind] = np.asarray([])
#array[commind] = np.asarray([])
return DataStream(liste, self.header,np.asarray(newar,dtype=object))
def remove_outlier(self, **kwargs):
"""
DEFINITION:
Flags outliers in data, uses quartiles.
Notes: Position of flag in flagstring:
f (intensity): pos 0
x,y,z (vector): pos 1
other (vector): pos 2
Position of flag in flagstring
x : pos 0
y : pos 1
z : pos 2
f : pos 3
t1 : pos 4
t2 : pos 5
var1 : pos 6
var2: pos 7
Coding : 0 take, 1 remove, 2 force take, 3 force remove
Example:
0000000, 0001000, etc
012 = take f, automatically removed v, and force use of other
300 = force remove f, take v, and take other
PARAMETERS:
Variables:
- None.
Kwargs:
- keys: (list) List of keys to evaluate. Default=['f']
- threshold: (float) Determines threshold for outliers.
1.5 = standard
5 = keeps storm onsets in
4 = Default as comprimise.
- timerange: (timedelta Object) Time range. Default = timedelta(hours=1)
- markall : marks all data except forcing has already been applied
- stdout: prints removed values to stdout
RETURNS:
- stream: (DataStream Object) Stream with flagged data.
EXAMPLE:
>>> stream.remove_outlier(keys=['x','y','z'], threshold=2)
APPLICATION:
"""
# Defaults:
timerange = kwargs.get('timerange')
threshold = kwargs.get('threshold')
keys = kwargs.get('keys')
markall = kwargs.get('markall')
stdout = kwargs.get('stdout')
if not timerange:
timerange = timedelta(hours=1)
if not keys:
keys = ['f']
if not threshold:
threshold = 4.0
if not stdout:
stdout = False
# Position of flag in flagstring
# f (intensity): pos 0
# x,y,z (vector): pos 1
# other (vector): pos 2
logger.info('remove_outlier: Starting outlier removal.')
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
arraytime = self.ndarray[0]
flagind = KEYLIST.index('flag')
commentind = KEYLIST.index('comment')
print ("Found ndarray - using flag_outlier instead")
return self.flag_outlier(**kwargs)
elif len(self) > 1:
arraytime = self._get_column('time')
else:
logger.warning('remove_outlier: No data - Stopping outlier removal.')
return self
# Working non-destructive
restream = self.copy()
# Start here with for key in keys:
for key in keys:
flagpos = FLAGKEYLIST.index(key)
st,et = self._find_t_limits()
st = date2num(st)
et = date2num(et)
at = date2num((num2date(st).replace(tzinfo=None)) + timerange)
incrt = at-st
newst = DataStream()
while st < et:
tmpar, idxst = find_nearest(arraytime,st)
tmpar, idxat = find_nearest(arraytime,at)
if idxat == len(arraytime)-1:
idxat = len(arraytime)
st = at
at += incrt
if ndtype:
ind = KEYLIST.index(key)
lstpart = self.ndarray[ind][idxst:idxat].astype(float)
print(lstpart)
print(np.isnan(lstpart))
selcol = lstpart[~np.isnan(lstpart)]
else:
lstpart = self[idxst:idxat]
# changed at 28.08.2014
#selcol = [eval('row.'+key) for row in lstpart]
selcol = [eval('row.'+key) for row in lstpart if not isnan(eval('row.'+key))]
try:
q1 = stats.scoreatpercentile(selcol,25)
q3 = stats.scoreatpercentile(selcol,75)
iqd = q3-q1
md = np.median(selcol)
whisker = threshold*iqd
except:
try:
md = np.median(selcol)
whisker = md*0.005
except:
logger.warning("remove_outlier: Eliminate outliers produced a problem: please check.")
pass
if ndtype:
# XXX DOES NOT WORK, TODO
for i in range(idxst,idxat):
if row.flag == '' or row.flag == '0000000000000000-' or row.flag == '-' or row.flag == '-0000000000000000':
row.flag = '-' * len(FLAGKEYLIST)
if row.comment == '-':
row.comment = ''
else:
for elem in lstpart:
row = LineStruct()
row = elem
if row.flag == '' or row.flag == '0000000000000000-' or row.flag == '-' or row.flag == '-0000000000000000':
#row.flag = '0000000000000000-'
row.flag = '-----------------'
if row.comment == '-':
row.comment = ''
if isNumber(row.flag): # if somehow the flag has been transfered to a number - create a string again
num = str(int(row.flag))[:-1]
row.flag = num+'-'
if not md-whisker < eval('elem.'+key) < md+whisker:
fllist = list(row.flag)
#print "Found", key
if len(fllist) >= flagpos:
fllist = np.asarray(fllist, dtype=object)
if not fllist[flagpos] in [1,2,3,4] :
if markall:
#print "mark"
fl = []
for j,f in enumerate(FLAGKEYLIST):
if f in keys:
fl.append('1')
else:
fl.append('-')
for idx, el in enumerate(fllist):
if el in [1,2,3,4]:
fl[idx] = el
fllist = fl
fllist[flagpos] = '1'
row.flag=''.join(fllist)
row.comment = "aof - threshold: %s, window: %s sec" % (str(threshold), str(timerange.total_seconds()))
#print row.flag, key
if not isnan(eval('elem.'+key)):
infoline = "remove_outlier: at %s - removed %s (= %f)" % (str(num2date(elem.time)),key, eval('elem.'+key))
logger.info(infoline)
if stdout:
print(infoline)
else:
fllist = list(row.flag)
if len(fllist) >= flagpos:
if row.flag == '':
pass
elif fllist[flagpos] == '-':
testlst = [el for el in fllist if el in ['0','1','2','3','4']]
if not len(testlst) > 0:
row.flag = ''
else:
pass
newst.add(row)
logger.info('remove_outlier: Outlier removal finished.')
if ndtype:
return restream
else:
return DataStream(newst, self.header, self.ndarray)
def resample(self, keys, debugmode=False,**kwargs):
"""
DEFINITION:
Uses Numpy interpolate.interp1d to resample stream to requested period.
Two methods:
fast: is only valid if time stamps at which resampling is conducted are part of the
original time series. e.g. org = second (58,59,0,1,2) resampled at 0
slow: general method if time stamps for resampling are not contained (e.g. 58.23, 59.24, 0.23,...)
resampled at 0
PARAMETERS:
Variables:
- keys: (list) keys to be resampled.
Kwargs:
- period: (float) sampling period in seconds, e.g. 5s (0.2 Hz).
- fast: (bool) use fast approximation
- startperiod: (integer) starttime in sec (e.g. 60 each minute, 900 each quarter hour
- offset: (integer) starttime in sec (e.g. 60 each minute, 900 each quarter hour
RETURNS:
- stream: (DataStream object) Stream containing resampled data.
EXAMPLE:
>>> resampled_stream = pos_data.resample(['f'],period=1)
APPLICATION:
"""
period = kwargs.get('period')
fast = kwargs.get('fast')
offset = kwargs.get('offset')
if not period:
period = 60.
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
sp = self.samplingrate()
logger.info("resample: Resampling stream of sampling period %s to period %s." % (sp,period))
logger.info("resample: Resampling keys %s " % (','.join(keys)))
# Determine the minimum time
t_min,t_max = self._find_t_limits()
t_start = t_min
if offset:
t_min = ceil_dt(t_min,period)
if t_min - offset > t_start:
t_min = t_min -offset
else:
t_min = t_min +offset
startperiod, line = self.findtime(t_min)
else:
t_min = ceil_dt(t_min,period)
startperiod, line = self.findtime(t_min)
if fast: # To be done if timesteps are at period timesteps
try:
logger.info("resample: Using fast algorithm.")
si = timedelta(seconds=sp)
sampling_period = si.seconds
if period <= sampling_period:
logger.warning("resample: Resampling period must be larger or equal than original sampling period.")
return self
if debugmode:
print ("Trying fast algorythm")
print ("Projected period and Sampling period:", period, sampling_period)
if not line == [] or ndtype: # or (ndtype and not line == []):
xx = int(np.round(period/sampling_period))
if ndtype:
newstream = DataStream([LineStruct()],{},np.asarray([]))
newstream.header = self.header
lst = []
for ind,elem in enumerate(self.ndarray):
if debugmode:
print ("dealing with column", ind, elem)
if len(elem) > 0:
lst.append(np.asarray(elem[startperiod::xx]))
else:
lst.append(np.asarray([]))
newstream.ndarray = np.asarray(lst)
else:
newstream = DataStream([],{},np.asarray([[] for el in KEYLIST]))
newstream.header = self.header
for line in self[startperiod::xx]:
newstream.add(line)
newstream.header['DataSamplingRate'] = str(period) + ' sec'
return newstream
logger.warning("resample: Fast resampling failed - switching to slow mode")
except:
logger.warning("resample: Fast resampling failed - switching to slow mode")
pass
# This is done if timesteps are not at period intervals
# -----------------------------------------------------
if debugmode:
print ("General -slow- resampling")
# Create a list containing time steps
#t_max = num2date(self._get_max('time'))
t_list = []
time = t_min
while time <= t_max:
t_list.append(date2num(time))
time = time + timedelta(seconds=period)
# Compare length of new time list with old timelist
# multiplicator is used to check whether nan value is at the corresponding position of the orgdata file - used for not yet completely but sufficiently correct missing value treatment
if not len(t_list) > 0:
return DataStream()
multiplicator = float(self.length()[0])/float(len(t_list))
logger.info("resample a: {},{},{}".format(float(self.length()[0]), float(len(t_list)),startperiod))
#print ("Times:", self.ndarray[0][0],self.ndarray[0][-1],t_list[0],t_list[-1])
stwithnan = self.copy()
# What is this good for (leon 17.04.2019)???
tmp = self.trim(starttime=736011.58337400458,endtime=736011.59721099539)
logger.info("resample test: {}".format(tmp.ndarray))
#tcol = stwithnan.ndarray[0]
res_stream = DataStream()
res_stream.header = self.header
array=[np.asarray([]) for elem in KEYLIST]
if ndtype:
array[0] = np.asarray(t_list)
res_stream.add(LineStruct())
else:
for item in t_list:
row = LineStruct()
row.time = item
res_stream.add(row)
for key in keys:
if debugmode:
print ("Resampling:", key)
if key not in KEYLIST[1:16]:
logger.warning("resample: Key %s not supported!" % key)
index = KEYLIST.index(key)
try:
#print (len(self._get_column(key)), multiplicator)
int_data = self.interpol([key],kind='linear')#'cubic')
int_func = int_data[0]['f'+key]
int_min = int_data[1]
int_max = int_data[2]
key_list = []
for ind, item in enumerate(t_list):
# normalized time range between 0 and 1
functime = (item - int_min)/(int_max - int_min)
# check whether original value is np.nan (as interpol method does not account for that)
# exact but slowly: idx = np.abs(tcol-item).argmin()
# orgval = stwithnan.ndarray[index][idx]
# reduce the index range as below
if ndtype:
if int(ind*multiplicator) <= len(self.ndarray[index]):
#orgval = self.ndarray[index][int(ind*multiplicator)]
estimate = False
# Please note: here a two techniques (exact and estimate)
# Speeddiff (example data set (500000 data points)
# Exact: 7.55 sec (including one minute filter)
# Estimate: 7.15 sec
if estimate:
orgval = stwithnan.ndarray[index][int(ind*multiplicator+startperiod)] # + offset
else:
# Exact solution:
mv = int(ind*multiplicator+startperiod)
stv = mv-int(20*multiplicator)
if stv < 0:
stv = 0
etv = mv+int(20*multiplicator)
if etv >= len(self.ndarray[index]):
etv = len(self.ndarray[index])
subar = stwithnan.ndarray[0][stv:etv]
idx = (np.abs(subar-item)).argmin()
#subar = stwithnan.ndarray[index][stv:etv]
orgval = stwithnan.ndarray[index][stv+idx] # + offset
#if item > 736011.58337400458 and item < 736011.59721099539:
# print ("Found", item, stv+idx, idx, orgval)
#if np.isnan(orgval):
# print (stv+idx, stv, etv)
else:
print("Check Resampling method")
orgval = 1.0
else:
orgval = getattr(stwithnan[int(ind*multiplicator+startperiod)],key)
tempval = np.nan
# Not a safe fix, but appears to cover decimal leftover problems
# (e.g. functime = 1.0000000014, which raises an error)
if functime > 1.0:
functime = 1.0
if not isnan(orgval):
tempval = int_func(functime)
key_list.append(float(tempval))
if ndtype:
array[index] = np.asarray(key_list)
else:
res_stream._put_column(key_list,key)
except:
logger.error("resample: Error interpolating stream. Stream either too large or no data for selected key")
res_stream.ndarray = np.asarray(array,dtype=object)
logger.info("resample: Data resampling complete.")
#return DataStream(res_stream,self.headers)
res_stream.header['DataSamplingRate'] = str(period) + ' sec'
return res_stream
def rotation(self,**kwargs):
"""
DEFINITION:
Rotation matrix for rotating x,y,z to new coordinate system xs,ys,zs using angles alpha and beta
PARAMETERS:
Variables:
Kwargs:
- alpha: (float) The horizontal rotation in degrees
- beta: (float) The vertical rotation in degrees
- keys: (list) provide an alternative vector to rotate - default is ['x','y','z']
keys are only supported from 1.0 onwards (ndarray)
RETURNS:
- self: (DataStream) The rotated stream
EXAMPLE:
>>> data.rotation(alpha=2.74)
APPLICATION:
"""
unit = kwargs.get('unit')
alpha = kwargs.get('alpha')
beta = kwargs.get('beta')
keys = kwargs.get('keys')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
if not alpha:
alpha = 0.
if not beta:
beta = 0.
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
logger.error('rotation: provided keylist need to have three components.')
return self
logger.info('rotation: Applying rotation matrix.')
"""
a[0][0] = cos(p)*cos(b);
a[0][1] = -sin(b);
a[0][2] = sin(p)*cos(b);
a[1][0] = cos(p)*sin(b);
a[1][1] = cos(b);
a[1][2] = sin(p)*sin(b);
a[2][0] = -sin(p);
a[2][1] = 0.0;
a[2][2] = cos(p);
xyz.l = ortho.l*a[0][0]+ortho.m*a[0][1]+ortho.n*a[0][2];
xyz.m = ortho.l*a[1][0]+ortho.m*a[1][1]+ortho.n*a[1][2];
xyz.n = ortho.l*a[2][0]+ortho.m*a[2][1]+ortho.n*a[2][2];
"""
ind1 = KEYLIST.index(keys[0])
ind2 = KEYLIST.index(keys[1])
ind3 = KEYLIST.index(keys[2])
if len(self.ndarray[0]) > 0:
if len(self.ndarray[ind1]) > 0 and len(self.ndarray[ind2]) > 0 and len(self.ndarray[ind3]) > 0:
ra = np.pi*alpha/(180.*ang_fac)
rb = np.pi*beta/(180.*ang_fac)
xar = self.ndarray[ind1].astype(float)*np.cos(rb)*np.cos(ra)-self.ndarray[ind2].astype(float)*np.sin(ra)+self.ndarray[ind3].astype(float)*np.sin(rb)*np.cos(ra)
yar = self.ndarray[ind1].astype(float)*np.cos(rb)*np.sin(ra)+self.ndarray[ind2].astype(float)*np.cos(ra)+self.ndarray[ind3].astype(float)*np.sin(rb)*np.sin(ra)
zar = -self.ndarray[ind1].astype(float)*np.sin(rb)+self.ndarray[ind3].astype(float)*np.cos(rb)
self.ndarray[ind1] = xar
self.ndarray[ind2] = yar
self.ndarray[ind3] = zar
"""
for elem in self:
ra = np.pi*alpha/(180.*ang_fac)
rb = np.pi*beta/(180.*ang_fac)
# Testing the conservation of f ##### Error corrected in May 2014 by leon
#fbefore = sqrt(elem.x**2+elem.y**2+elem.z**2)
xs = elem.x*np.cos(rb)*np.cos(ra)-elem.y*np.sin(ra)+elem.z*np.sin(rb)*np.cos(ra)
ys = elem.x*np.cos(rb)*np.sin(ra)+elem.y*np.cos(ra)+elem.z*np.sin(rb)*np.sin(ra)
zs = -elem.x*np.sin(rb)+elem.z*np.cos(rb)
#fafter = sqrt(xs**2+ys**2+zs**2)
#print "f:", fbefore,fafter,fbefore-fafter
elem.x = xs
elem.y = ys
elem.z = zs
"""
logger.info('rotation: Finished reorientation.')
return self
def scale_correction(self, keys, scales, **kwargs):
"""
DEFINITION:
multiplies the selected keys by the given scale values
PARAMETERS:
Kwargs:
- offset: (array) containing constant offsets for the given keys
RETURNS:
- DataStream
EXAMPLES:
>>> stream = stream.scale_correction(['x','y','z'],[1,0.988,1])
"""
print("Function will be removed - use e.g. self.multiply({'y': 0.988}) instead")
# Take care: if there is only 0.1 nT accurracy then there will be a similar noise in the deltaF signal
offset = kwargs.get('offset')
if not offset:
offset = [0]*len(keys)
else:
if not len(offset) == len(keys):
logger.error('scale_correction: offset with wrong dimension given - needs to have the same length as given keys - returning stream without changes')
return self
try:
assert len(self) > 0
except:
logger.error('scale_correction: empty stream - aborting')
return self
offsetlst = []
for key in KEYLIST:
if key in keys:
pos = keys.index(key)
offsetlst.append(offset[pos])
else:
offsetlst.append(0.0)
logger.info('scale_correction: --- Scale correction started at %s ' % str(datetime.now()))
for elem in self:
for i,key in enumerate(keys):
exec('elem.'+key+' = (elem.'+key+'+offset[i]) * scales[i]')
scalelst = []
for key in KEYLIST:
if key in keys:
pos = keys.index(key)
scalelst.append(scales[pos])
else:
scalelst.append(1.)
#print '_'.join(map(str,offsetlst)), scalelst
self.header['DataScaleValues'] = '_'.join(map(str,scalelst))
self.header['DataOffsets'] = '_'.join(map(str,offsetlst))
logger.info('scale_correction: --- Scale correction finished at %s ' % str(datetime.now()))
return self
def selectkeys(self, keys, **kwargs):
"""
DEFINITION:
Take data stream and remove all except the provided keys from ndarray
RETURNS:
- self: (DataStream) with ndarray limited to keys
EXAMPLE:
>>> keydata = fulldata.selectkeys(['x','y','z'])
APPLICATION:
"""
noflags = kwargs.get('noflags')
stream = self.copy()
if not 'time' in keys:
ti = ['time']
ti.extend(keys)
keys = ti
if len(stream.ndarray[0]) > 0:
# Check for flagging and comment column
if not noflags:
flagidx = KEYLIST.index('flag')
commentidx = KEYLIST.index('comment')
if len(stream.ndarray[flagidx]) > 0:
keys.append('flag')
if len(stream.ndarray[commentidx]) > 0:
keys.append('comment')
# Remove all missing
for idx, elem in enumerate(stream.ndarray):
if not KEYLIST[idx] in keys:
stream.ndarray[idx] = np.asarray([])
return stream
else:
return stream
def smooth(self, keys=None, **kwargs):
"""
DEFINITION:
Smooth the data using a window with requested size.
(taken from Cookbook/Signal Smooth)
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
PARAMETERS:
Variables:
- keys: (list) List of keys to smooth
Kwargs:
- window_len: (int,odd) dimension of the smoothing window
- window: (str) the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'. A flat window will produce a moving average smoothing.
(See also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter)
RETURNS:
- self: (DataStream) The smoothed signal
EXAMPLE:
>>> nice_data = bad_data.smooth(['x','y','z'])
or
>>> t=linspace(-2,2,0.1)
>>> x=sin(t)+randn(len(t))*0.1
>>> y=smooth(x)
APPLICATION:
TODO:
the window parameter could be the window itself if an array instead of a string
"""
# Defaults:
window_len = kwargs.get('window_len')
window = kwargs.get('window')
if not window_len:
window_len = 11
if not window:
window='hanning'
if not keys:
keys=self._get_key_headers(numerical=True)
window_len = int(window_len)
ndtype = False
if len(self.ndarray[0])>0:
ndtype = True
logger.info('smooth: Start smoothing (%s window, width %d) at %s' % (window, window_len, str(datetime.now())))
for key in keys:
if key in NUMKEYLIST:
if ndtype:
ind = KEYLIST.index(key)
x = self.ndarray[ind]
else:
x = self._get_column(key)
x = maskNAN(x)
if x.ndim != 1:
logger.error("smooth: Only accepts 1 dimensional arrays.")
if x.size < window_len:
print(x.size, window_len)
logger.error("smooth: Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
logger.error("smooth: Window is none of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
logger.debug("smooth: You entered string %s as a window." % window)
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
if ndtype:
self.ndarray[ind] = np.asarray(y[(int(window_len/2)):(len(x)+int(window_len/2))])
else:
self._put_column(y[(int(window_len/2)):(len(x)+int(window_len/2))],key)
else:
logger.error("Column key %s not valid." % key)
logger.info('smooth: Finished smoothing at %s' % (str(datetime.now())))
return self
def spectrogram(self, keys, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
mult=8.0, cmap=None, zorder=None, title=None, show=True,
sphinx=False, clip=[0.0, 1.0], **kwargs):
"""
Creates a spectrogram plot of selected keys.
Parameter description at function obspyspectrogram
keywords:
samp_rate_multiplicator: to change the frequency relative to one day (default value is Hz - 24*3600)
samp_rate_multiplicator : sampling rate give as days -> multiplied by x to create Hz, etc: default 24, which means 1/3600 Hz
"""
samp_rate_multiplicator = kwargs.get('samp_rate_multiplicator')
if not samp_rate_multiplicator:
samp_rate_multiplicator = 24*3600
t = self._get_column('time')
if not len(t) > 0:
logger.error('Spectrogram: stream of zero length -- aborting')
return
for key in keys:
val = self._get_column(key)
val = maskNAN(val)
dt = self.get_sampling_period()*(samp_rate_multiplicator)
Fs = float(1.0/dt)
self.obspyspectrogram(val,Fs, per_lap=per_lap, wlen=wlen, log=log,
outfile=outfile, fmt=fmt, axes=axes, dbscale=dbscale,
mult=mult, cmap=cmap, zorder=zorder, title=title, show=show,
sphinx=sphinx, clip=clip)
def steadyrise(self, key, timewindow, **kwargs):
"""
DEFINITION:
Method determines the absolute increase within a data column
and a selected time window
neglecting any resets and decreasing trends
- used for analyzing some rain senors
PARAMETERS:
key: (key) column on which the process is performed
timewindow: (timedelta) define the window e.g. timedelta(minutes=15)
Kwargs:
sensitivitylevel: (float) define a difference which two successive
points need to exceed to be used
(useful if you have some numeric noise)
RETURNS:
- column: (array) column with length of th stream
containing timewindow blocks of stacked data.
EXAMPLE:
>>> col = stream.steadyrise('t1', timedelta(minutes=60),sensitivitylevel=0.002)
"""
sensitivitylevel = kwargs.get('sensitivitylevel')
prevval = 9999999999999.0
stacked = 0.0
count = 0
rescol = []
testcol = []
ndtype = False
if len(self.ndarray[0]) > 0:
ndtype = True
ind = KEYLIST.index(key)
if ndtype and len(self.ndarray[ind]) > 0:
startt = num2date(np.min(self.ndarray[0]))
for idx,val in enumerate(self.ndarray[ind]):
if num2date(self.ndarray[0][idx]) < startt+timewindow:
if prevval < val:
diff = val-prevval
if not sensitivitylevel:
stacked += val-prevval
elif diff > sensitivitylevel:
stacked += val-prevval
count += 1
else:
for i in range(count+1):
rescol.append(stacked)
count = 0
# now put that results back to a column
startt = startt+timewindow
stacked = 0.0
prevval = val
elif not ndtype:
startt = num2date(self[0].time)
for elem in self:
testcol.append(elem)
if num2date(elem.time) < startt+timewindow:
val = eval('elem.'+key)
if prevval < val:
diff = val-prevval
if not sensitivitylevel:
stacked += val-prevval
elif diff > sensitivitylevel:
stacked += val-prevval
count += 1
else:
for i in range(count+1):
rescol.append(stacked)
count = 0
# now put that results back to a column
startt = startt+timewindow
val = eval('elem.'+key)
stacked = 0.0
prevval = val
else:
print("steadyrise: no data found in selected column %s" % key)
return np.asarray([])
# Finally fill the end
for i in range(count):
rescol.append(stacked)
if not len(rescol) == len(self) and not len(rescol) == len(self.ndarray[0]) :
logger.error('steadrise: An error leading to unequal lengths has been encountered')
return []
return np.asarray(rescol)
def stereoplot(self, **kwargs):
"""
DEFINITION:
plots a dec and inc values in stereographic projection
will abort if no idff typ is provided
full circles denote positive inclinations, open negative
PARAMETERS:
variable:
- stream (DataStream) a magpy datastream object
kwargs:
- focus: (string) defines the plot area - can be either:
all - -90 to 90 deg inc, 360 deg dec (default)
q1 - first quadrant
q2 - first quadrant
q3 - first quadrant
q4 - first quadrant
data - focus on data (if angular spread is less then 10 deg
- groups (KEY) - key of keylist which defines color of points
(e.g. ('str2') in absolutes to select
different colors for different instruments
- legend (bool) - draws legend only if groups is given - default True
- legendposition (string) - draws the legend at chosen position (e.g. "upper right", "lower center") - default is "lower left"
- labellimit (integer)- maximum length of label in legend
- noshow: (bool) don't call show at the end, just returns figure handle
- outfile: (string) to save the figure, if path is not existing it will be created
- gridcolor: (string) Define grid color e.g. '0.5' greyscale, 'r' red, etc
- savedpi: (integer) resolution
- figure: (bool) True for GUI
REQUIRES:
- package operator for color selection
RETURNS:
- plot
ToDo:
- add alpha 95 calc
EXAMPLE:
>>> stream.stereoplot(focus='data',groups='str2')
"""
focus = kwargs.get('focus')
groups = kwargs.get('groups')
bgcolor = kwargs.get('bgcolor')
colorlist = kwargs.get('colorlist')
outfile = kwargs.get('outfile')
savedpi = kwargs.get('savedpi')
gridinccolor = kwargs.get('gridinccolor')
griddeccolor = kwargs.get('griddeccolor')
noshow = kwargs.get('noshow')
legend = kwargs.get('legend')
legendposition = kwargs.get('legendposition')
labellimit = kwargs.get('labellimit')
figure = kwargs.get('figure')
if not colorlist:
colorlist = ['b','r','g','c','m','y','k']
if not bgcolor:
bgcolor = '#d5de9c'
if not griddeccolor:
griddeccolor = '#316931'
if not gridinccolor:
gridinccolor = '#316931'
if not savedpi:
savedpi = 80
if not focus:
focus = 'all'
if not legend:
legend = 'True'
if not labellimit:
labellimit = 11
if not legendposition:
legendposition = "lower left"
if not self[0].typ == 'idff':
logger.error('Stereoplot: you need to provide idf data')
return
inc = self._get_column('x')
dec = self._get_column('y')
col = ['']
if groups:
sel = self._get_column(groups)
col = list(set(list(sel)))
if len(col) > 7:
col = col[:7]
if not len(dec) == len(inc):
logger.error('Stereoplot: check you data file - unequal inc and dec data?')
return
if not figure:
fig = plt.figure()
else:
fig = figure
ax = plt.gca()
ax.cla() # clear things for fresh plot
ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
# Define koordinates:
basic1=plt.Circle((0,0),90,color=bgcolor,fill=True)
basic1a=plt.Circle((0,0),90,color=gridinccolor,fill=False)
basic2=plt.Circle((0,0),30,color=gridinccolor,fill=False,linestyle='dotted')
basic3=plt.Circle((0,0),60,color=gridinccolor,fill=False,linestyle='dotted')
basic4=plt.Line2D([0,0],[-90,90],color=griddeccolor,linestyle='dashed')
basic5=plt.Line2D([-90,90],[0,0],color=griddeccolor,linestyle='dashed')
fig.gca().add_artist(basic1)
fig.gca().add_artist(basic1a)
fig.gca().add_artist(basic2)
fig.gca().add_artist(basic3)
fig.gca().add_artist(basic4)
fig.gca().add_artist(basic5)
for j in range(len(col)):
color = colorlist[j]
xpos,ypos,xneg,yneg,xabs,y = [],[],[],[],[],[]
for i,el in enumerate(inc):
if groups:
if sel[i] == col[j]:
coinc = 90-np.abs(el)
sindec = np.sin(np.pi/180*dec[i])
cosdec = np.cos(np.pi/180*dec[i])
xabs.append(coinc*sindec)
y.append(coinc*cosdec)
if el < 0:
xneg.append(coinc*sindec)
yneg.append(coinc*cosdec)
else:
xpos.append(coinc*sindec)
ypos.append(coinc*cosdec)
else:
coinc = 90-np.abs(el)
sindec = np.sin(np.pi/180*dec[i])
cosdec = np.cos(np.pi/180*dec[i])
xabs.append(coinc*sindec)
y.append(coinc*cosdec)
if el < 0:
xneg.append(coinc*sindec)
yneg.append(coinc*cosdec)
else:
xpos.append(coinc*sindec)
ypos.append(coinc*cosdec)
xmax = np.ceil(max(xabs))
xmin = np.floor(min(xabs))
xdif = xmax-xmin
ymax = np.ceil(max(y))
ymin = np.floor(min(y))
ydif = ymax-ymin
maxdif = max([xdif,ydif])
mindec = np.floor(min(dec))
maxdec = np.ceil(max(dec))
mininc = np.floor(min(np.abs(inc)))
maxinc = np.ceil(max(np.abs(inc)))
if focus == 'data' and maxdif <= 10:
# decs
startdec = mindec
decline,inclst = [],[]
startinc = mininc
incline = []
while startdec <= maxdec:
xl = 90*np.sin(np.pi/180*startdec)
yl = 90*np.cos(np.pi/180*startdec)
decline.append([xl,yl,startdec])
startdec = startdec+1
while startinc <= maxinc:
inclst.append(90-np.abs(startinc))
startinc = startinc+1
if focus == 'all':
ax.set_xlim((-90,90))
ax.set_ylim((-90,90))
if focus == 'q1':
ax.set_xlim((0,90))
ax.set_ylim((0,90))
if focus == 'q2':
ax.set_xlim((-90,0))
ax.set_ylim((0,90))
if focus == 'q3':
ax.set_xlim((-90,0))
ax.set_ylim((-90,0))
if focus == 'q4':
ax.set_xlim((0,90))
ax.set_ylim((-90,0))
if focus == 'data':
ax.set_xlim((xmin,xmax))
ax.set_ylim((ymin,ymax))
#ax.annotate('Test', xy=(1.2, 25.2))
ax.plot(xpos,ypos,'o',color=color, label=col[j][:labellimit])
ax.plot(xneg,yneg,'o',color='white')
ax.annotate('60', xy=(0, 30))
ax.annotate('30', xy=(0, 60))
ax.annotate('0', xy=(0, 90))
ax.annotate('90', xy=(90, 0))
ax.annotate('180', xy=(0, -90))
ax.annotate('270', xy=(-90, 0))
if focus == 'data' and maxdif <= 10:
for elem in decline:
pline = plt.Line2D([0,elem[0]],[0,elem[1]],color=griddeccolor,linestyle='dotted')
xa = elem[0]/elem[1]*((ymax - ymin)/2+ymin)
ya = (ymax - ymin)/2 + ymin
annotext = "D:%i" % int(elem[2])
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_artist(pline)
for elem in inclst:
pcirc = plt.Circle((0,0),elem,color=gridinccolor,fill=False,linestyle='dotted')
xa = (xmax-xmin)/2 + xmin
ya = sqrt((elem*elem)-(xa*xa))
annotext = "I:%i" % int(90-elem)
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_artist(pcirc)
if groups and legend:
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels),key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=legendposition)
# 5. SAVE TO FILE (or show)
if figure:
return ax
if outfile:
path = os.path.split(outfile)[0]
if not path == '':
if not os.path.exists(path):
os.makedirs(path)
if fmt:
fig.savefig(outfile, format=fmt, dpi=savedpi)
else:
fig.savefig(outfile, dpi=savedpi)
elif noshow:
return fig
else:
plt.show()
def trim(self, starttime=None, endtime=None, newway=False):
"""
DEFINITION:
Removing dates outside of range between start- and endtime.
Returned stream has range starttime <= range < endtime.
PARAMETERS:
Variables:
- starttime: (datetime/str) Start of period to trim with
- endtime: (datetime/str) End of period to trim to
Kwargs:
- newway: (bool) Testing method for non-destructive trimming
RETURNS:
- stream: (DataStream object) Trimmed stream
EXAMPLE:
>>> data = data.trim(starttime, endtime)
APPLICATION:
"""
if starttime and endtime:
if self._testtime(starttime) > self._testtime(endtime):
logger.error('Trim: Starttime (%s) is larger than endtime (%s).' % (starttime,endtime))
raise ValueError("Starttime is larger than endtime.")
logger.info('Trim: Started from %s to %s' % (starttime,endtime))
ndtype = False
if self.ndarray[0].size > 0:
ndtype = True
self.container = [LineStruct()]
#-ndarrray---------------------------------------
if not newway:
newarray = list(self.ndarray) # Converting array to list - better for append and other item function (because its not type sensitive)
else:
newstream = self.copy()
newarray = list(newstream.ndarray)
if starttime:
starttime = self._testtime(starttime)
if newarray[0].size > 0: # time column present
idx = (np.abs(newarray[0].astype(float)-date2num(starttime))).argmin()
# Trim should start at point >= starttime, so check:
if newarray[0][idx] < date2num(starttime):
idx += 1
for i in range(len(newarray)):
if len(newarray[i]) >= idx:
newarray[i] = newarray[i][idx:]
if endtime:
endtime = self._testtime(endtime)
if newarray[0].size > 0: # time column present
idx = 1 + (np.abs(newarray[0].astype(float)-date2num(endtime))).argmin() # get the nearest index to endtime and add 1 (to get lenghts correctly)
#idx = 1+ (np.abs(self.ndarray[0]-date2num(endtime))).argmin() # get the nearest index to endtime
if idx >= len(newarray[0]): ## prevent too large idx values
idx = len(newarray[0]) - 1
while True:
if not float(newarray[0][idx]) < date2num(endtime) and idx != 0: # Make sure that last value is smaller than endtime
idx -= 1
else:
break
#self.ndarray = list(self.ndarray)
for i in range(len(newarray)):
length = len(newarray[i])
if length >= idx:
newarray[i] = newarray[i][:idx+1]
newarray = np.asarray(newarray,dtype=object)
#-ndarrray---------------------------------------
#--------------------------------------------------
if newway and not ndtype:
# Non-destructive trimming of stream
trimmedstream = DataStream()
trimmedstream.header = self.header
starttime = self._testtime(starttime)
endtime = self._testtime(endtime)
stval = 0
for idx, elem in enumerate(self):
newline = LineStruct()
if not isnan(elem.time):
if elem.time >= date2num(starttime) and elem.time < date2num(endtime):
newline.time = elem.time
for key in KEYLIST:
exec('newline.'+key+' = elem.'+key)
trimmedstream.add(newline)
return trimmedstream
#--------------------------------------------------
if not ndtype:
stream = DataStream()
if starttime:
# check starttime input
starttime = self._testtime(starttime)
stval = 0
for idx, elem in enumerate(self):
if not isnan(elem.time):
if num2date(elem.time).replace(tzinfo=None) > starttime.replace(tzinfo=None):
#stval = idx-1 # changed because of latex output
stval = idx
break
if stval < 0:
stval = 0
self.container = self.container[stval:]
# remove data prior to endtime input
if endtime:
# check endtime input
endtime = self._testtime(endtime)
edval = len(self)
for idx, elem in enumerate(self):
if not isnan(elem.time):
if num2date(elem.time).replace(tzinfo=None) > endtime.replace(tzinfo=None):
edval = idx
#edval = idx-1
break
self.container = self.container[:edval]
if ndtype:
return DataStream(self.container,self.header,newarray)
else:
return DataStream(self.container,self.header,self.ndarray)
def use_sectime(self, swap=False):
"""
DEFINITION:
Drop primary time stamp and replace by secondary time stamp if available.
If swap is True, then primary time stamp is moved to secondary column (and
not dropped).
"""
if not 'sectime' in self._get_key_headers():
logger.warning("use_sectime: did not find secondary time column in the streams keylist - returning unmodified timeseries")
return self
# Non destructive
stream = self.copy()
pos = KEYLIST.index('sectime')
tcol = stream.ndarray[0]
stream = stream._move_column('sectime','time')
if swap:
stream = stream._put_column(tcol,'sectime')
else:
stream = stream._drop_column('sectime')
return stream
def variometercorrection(self, variopath, thedate, **kwargs):
"""
DEFINITION:
##### THS METHOD IS USELESS....
##### Either select a certain time in absolute calculation (TODO)
##### or calculate daily means of basevalues which ar already corrected for
##### variotion --- leon 2016-03
Function to perform a variometercorrection of an absresult stream
towards the given datetime using the given variometer stream.
Returns a new absresult object with new datetime and corrected values
APPLICATION:
Useful to compare various absolute measurement e.g. form one day and analyse their
differences after correcting them to a single spot in time.
PARAMETERS:
Variables:
- variodata: (DataStream) data to be used for reduction
- endtime: (datetime/str) End of period to trim to
Kwargs:
- funckeys: (list) keys of the variometerfile which are interpolated and used
- nomagorient: (bool) indicates that variometerdata is NOT in magnetic
coordinates (hez) - Method will then use header info
in DataRotationAlpha and Beta
RETURNS:
- stream: (DataStream object) absolute stream - corrected
EXAMPLE:
>>> newabsdata = absdata.variometercorrection(starttime, endtime)
APPLICATION:
"""
funckeys = kwargs.get('funckeys')
offset = kwargs.get('offset')
nomagorient = kwargs.get('nomagorient')
if not offset:
offset = 0.0
dateform = "%Y-%m-%d"
def getfuncvals(variofunc,day):
# Put the following to a function
functime = (date2num(day)-variofunc[1])/(variofunc[2]-variofunc[1])
#print(functime, day, date2num(day),variofunc[1],variofunc[2])
refval = []
for key in funckeys:
if key in ['x','y','z']:
refval.append(variofunc[0]['f'+key](functime))
return refval
# Return results within a new streamobject containing only
# the average values and its uncertainties
resultstream = DataStream()
# Check for ndtype:
ndtype = False
if len(self.ndarray[0]) > 0:
timecol = self.ndarray[0]
ndtype = True
typus = self.header.get('DataComponents')
try:
typus = typus.lower()[:3]
except:
typus = ''
else:
timecol = self._get_column('time')
try:
typus = self[0].typ[:3]
except:
typus = ''
# 1 Convert absresult - idff to xyz ---- NOT NECESSARY
# test stream type (xyz, idf or hdz?)
# TODO add the end check whether streams are modified!!!!!!!!!!
#print("Variometercorrection", typus)
absstream = self.copy()
absstream = absstream.removeduplicates()
# 2 Convert datetime to number
# check whether thedate is a time (then use this time every day)
# or a full date
datelist = []
try:
# Check whether provided thedate is a date with time
datelist = [self._testtime(thedate)]
print("Variometercorrection: using correction to single provided datetime", datelist[0])
except:
try:
# Check whether provided thedate is only time
tmpdatelst = [datetime.date(num2date(elem)) for elem in timecol]
tmpdatelst = list(set(tmpdatelst))
dummydatedt = self._testtime('2016-11-22T'+thedate)
datelist = [datetime.combine(elem, datetime.time(dummydatedt)) for elem in tmpdatelst]
except:
print("Variometercorrection: Could not interpret the provided date/time - aborting - used dateformat should be either 12:00:00 or 2016-11-22 12:00:00 - provided:", thedate)
return self
if len(datelist) == 1:
print("Variometercorrection: Transforming all provided absolute data towards", datelist[0])
elif len(datelist) > 1:
print("Variometercorrection: Correcting all absolute data of individual days towards time", datetime.strftime(datelist[0],"%H:%M:%S"))
else:
print("Variometercorrection: No correction date found - aborting")
return self
for day in datelist:
print("Variocorrection: dealing with {}".format(day))
# 1. Select the appropriate values from self
if len(datelist) == 1:
usedabsdata = absstream
st, et = absstream._find_t_limits()
else:
st = str(datetime.date(day))
et = str(datetime.date(day+timedelta(days=1)))
usedndarray = absstream._select_timerange(starttime=st, endtime=et)
usedabsdata = DataStream([LineStruct()],self.header,usedndarray)
#print(date, num2date(usedabsdata.ndarray[0]))
# 2. Read variation data for respective date
vario = read(variopath, starttime=st, endtime=et)
print("Variocorrection: loaded {} data points".format(vario.length()[0]))
#print("Variocorrection: Please note - we are assuming that the provided variometerdata records the field in magnetic coordinates in nT (e.g. HEZ). In case of geographic xyz records one can activate a kwarg: takes provided rotation angle or (if not existing) the declination value of abs data")
# 3. Check DataComponents: we need pure variation data
comps = vario.header.get('DataComponents')
try:
comps = comps.lower()[:3]
except:
comps = ''
if comps in ['xyz','idf','hdz']:
# Data is already in geographic coordinates
# Rotate back
if not comps == 'xyz':
vario = vario._convertstream(comps+'2xyz')
nomagorient = True
else:
nomagorient = False
# 4. TODO TEST! Eventually rotate the data to hez
if nomagorient:
rotaangle = vario.header.get('DataRotationAlpha')
rotbangle = vario.header.get('DataRotationBeta')
#print("Angles", rotaangle, rotbangle)
try:
rotaangle = float(rotaangle)
rotbangle = float(rotbangle)
except:
pass
if rotaangle in [None,np.nan,0.0]:
print("Variocorrection: Did not find DataRotationAlpha in header assuming xyz and rotation by minus declination")
rotaangle = -np.mean(usedabsdata.ndarray[2])
else:
try:
rotaangle = float(rotaangle)
except:
rotaangle = 0.
if not rotbangle in [None,'Null',np.nan,0.0]:
try:
rotbangle = float(rotbangle)
except:
rotbangle = 0.
print("Variocorrection: Rotating data by {a} and {b}".format(a=rotaangle,b=rotbangle))
vario = vario.rotation(alpha=rotaangle,beta=rotbangle)
if vario.length()[0] > 1 and len(usedabsdata.ndarray[0]) > 0:
variost, varioet = vario._find_t_limits()
# 4. Interpolating variation data
if not funckeys:
funckeys = []
keys = vario._get_key_headers(numerical=True)
for key in keys:
if key in ['x','y','z','f']:
funckeys.append(key)
variofunc = vario.interpol(funckeys)
refvals = getfuncvals(variofunc,day)
for idx,abstime in enumerate(usedabsdata.ndarray[0]):
variovalsatabstime = getfuncvals(variofunc,num2date(abstime))
diffs= np.asarray(refvals)-np.asarray(variovalsatabstime)
"""
if key == 'y':
#refy = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + function[0]['f'+key](functime)
pass
elif key in ['x','z']:
pass
else:
pass
#refvals = funcattime(variofunc,date)
# 5. Get variofunc data for selected date and each usedabsdata
#for abstime in usedabsdata.ndarray[0]:
# if variost
#absst, abset = usedabsdata._find_t_limits()
"""
"""
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndarray[ind]), key, self.ndarray[ind]
array[ind] = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + function[0]['f'+key](functimearray)
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
print("func2stream", function, function[0], function[0]['f'+key],functimearray)
array[ind] = ar + function[0]['f'+key](functimearray)
if key == 'x': # remember this for correct y determination
arrayx = array[ind]
"""
"""
for date in datelist:
newvallists=[]
for elem in absstream:
# if elem.time == date:
# if value existis in function:
# calnewvalues and append to lists
# calc means from lists
# append means to new stream
# 4 Test whether variostream covers the timerange between the abstream value(s) and the datetime
if function[1] <= elem.time <= function[2] and function[1] <= newdate <= function[2]:
valatorgtime = (elem.time-function[1])/(function[2]-function[1])
valatnewtime = (newdate-function[1])/(function[2]-function[1])
elem.time = newdate
for key in funckeys:
if not key in KEYLIST[1:15]:
raise ValueError, "Column key not valid"
fkey = 'f'+key
if fkey in function[0]:
try:
orgval = float(function[0][fkey](valatorgtime))
newval = float(function[0][fkey](valatnewtime))
diff = orgval - newval
except:
logger.error("variometercorrection: error in assigning new values")
return
exec('elem.'+key+' = elem.'+key+' - diff')
else:
pass
else:
logger.warning("variometercorrection: Variometer stream does not cover the projected time range")
pass
# 5 Convert absresult - xyzf to idff
absstream = absstream._convertstream('xyz2idf')
return absstream
"""
def _write_format(self, format_type, filenamebegins, filenameends, coverage, dateformat,year):
"""
DEFINITION:
Helper method to determine suggested write filenames.
Reads format_type and header info of self -> returns specifications
RETURNS:
filenamebegins
filenameends
coverage
dateformat
"""
# Preconfigure some fileformats - can be overwritten by keywords
if format_type == 'IMF':
dateformat = '%b%d%y'
try:
extension = (self.header.get('StationID','')).lower()
except:
extension = 'txt'
filenameends = '.'+extension
coverage = 'day'
if format_type == 'IAF':
try:
filenamebegins = (self.header.get('StationIAGAcode','')).upper()
except:
filenamebegins = 'XXX'
dateformat = '%y%b'
extension = 'BIN'
coverage = 'month'
filenameends = '.'+extension
if format_type == 'IYFV':
if not filenameends or filenameends=='.cdf':
head = self.header
code = head.get('StationIAGAcode','')
if not code == '':
filenameends = '.'+code.upper()
else:
filenameends = '.XXX'
if not filenamebegins:
filenamebegins = 'YEARMEAN'
dateformat = 'None'
coverage = 'year'
if format_type == 'IAGA':
dateformat = '%Y%m%d'
if not coverage == 'all':
coverage = 'day'
head = self.header
if not filenamebegins:
code = head.get('StationIAGAcode','')
if code == '':
code = head.get('StationID','')
if not code == '':
filenamebegins = code.lower()[:3]
if not filenameends or filenameends=='.cdf':
samprate = float(str(head.get('DataSamplingRate','0')).replace('sec','').strip())
plevel = head.get('DataPublicationLevel',0)
if int(samprate) == 1:
middle = 'sec'
elif int(samprate) == 60:
middle = 'min'
elif int(samprate) == 3600:
middle = 'hou'
else:
middle = 'lol'
if plevel == 4:
fed = 'd'+middle+'.'+middle
elif plevel == 3:
fed = 'q'+middle+'.'+middle
elif plevel == 2:
fed = 'p'+middle+'.'+middle
else:
fed = 'v'+middle+'.'+middle
filenameends = fed
if format_type == 'CSV':
if not filenameends:
filenameends = '.csv'
if format_type == 'IMAGCDF':
begin = (self.header.get('StationIAGAcode','')).lower()
if begin == '':
begin = (self.header.get('StationID','XYZ')).lower()
publevel = str(self.header.get('DataPublicationLevel',0))
samprate = float(str(self.header.get('DataSamplingRate','0')).replace('sec','').strip())
if coverage == 'year':
dfor = '%Y'
elif coverage == 'month':
dfor = '%Y%m'
else:
dfor = '%Y%m%d'
if int(samprate) == 1:
dateformat = dfor
middle = '_000000_PT1S_'
elif int(samprate) == 60:
dateformat = dfor
middle = '_0000_PT1M_'
elif int(samprate) == 3600:
dateformat = dfor
middle = '_00_PT1H_'
elif int(samprate) == 86400:
dateformat = dfor
middle = '_PT1D_'
elif int(samprate) > 30000000:
dateformat = '%Y'
middle = '_PT1Y_'
elif int(samprate) > 2400000:
dateformat = '%Y%m'
middle = '_PT1M_'
else:
dateformat = '%Y%m%d'
middle = 'unknown'
filenamebegins = begin+'_'
filenameends = middle+publevel+'.cdf'
if format_type == 'BLV':
if len(self.ndarray[0]) > 0:
lt = max(self.ndarray[0].astype(float))
else:
lt = self[-1].time
if year:
blvyear = str(year)
else:
blvyear = datetime.strftime(num2date(lt).replace(tzinfo=None),'%Y')
try:
filenamebegins = (self.header['StationID']).upper()+blvyear
except:
filenamebegins = 'XXX'+blvyear
filenameends = '.blv'
coverage = 'all'
if not format_type:
format_type = 'PYCDF'
if not dateformat:
dateformat = '%Y-%m-%d' # or %Y-%m-%dT%H or %Y-%m or %Y or %Y
if not coverage:
coverage = 'day' #timedelta(days=1)
if not filenamebegins:
filenamebegins = ''
if not filenameends and not filenameends == '':
# Extension for cdf files is automatically attached
if format_type in ['PYCDF','IMAGCDF']:
filenameends = ''
else:
filenameends = '.txt'
return format_type, filenamebegins, filenameends, coverage, dateformat
def write(self, filepath, compression=5, **kwargs):
"""
DEFINITION:
Code for simple application: write Stream to a file.
PARAMETERS:
Variables:
- filepath: (str) Providing path/filename for saving.
Kwargs:
- coverage: (str/timedelta) day files or hour or month or year or all - default day.
'month','year','all',etc., otherwise timedelta object
- dateformat: (str) outformat of date in filename (e.g. "%Y-%m-%d" -> "2011-11-22".
- filenamebegins: (str) providing the begin of savename (e.g. "WIK_").
- filenameends: (str) providing the end of savename (e.g. ".min").
- format_type: (str) Which format - default pystr.
Current supported formats: PYSTR, PYCDF, IAGA, WDC, DIDD,
PMAG1, PMAG2, DTU1, GDASA1, RMRCS, AUTODIF_FREAD,
USBLOG, CR800, LATEX
- keys: (list) Keys to write to file.
- mode: (str) Mode for handling existing files/data in files.
Options: append, overwrite, replace, skip
[- period: (str) Supports hour, day, month, year, all - default day.]
[--> Where is this?]
- wformat: (str) outputformat.
SPECIFIC FORMAT INSTRUCTIONS:
format_type='IAGA'
------------------
*General:
The meta information provided within the header of each IAGA file is automatically
generated from the header information provided along with the following keys
(define by stream.header[key]):
- Obligatory: StationInstitution, StationName, StationIAGAcode (or StationID),
DataElevation, DataSensorOrientation, DataDigitalSampling
- Optional: SensorID, DataPublicationDate, DataComments, DataConversion, StationK9,
SecondarySensorID (F sensor), StationMeans (used for 'Approx H')
- Header input "IntervalType": can either be provided by using key 'DataIntervalType'
or is automatically created from DataSamplingRate.
Filter details as contained in DataSamplingFilter are added to the
commentary part
- Header input "Geodetic Longitude and Latitude":
- defined with keys 'DataAcquisitionLatitude','DataAcquisitionLongitude'
- if an EPSG code is provided in key 'DataLocationReference'
this code is used to convert Lat and Long into the WGS84 system
e.g. stream.header['DataLocationReference'] = 'M34, EPSG: '
*Specific parameters:
- useg (Bool) if F is available, and G not yet caluclated: calculate G (deltaF) and
use it within the IAGA output file
*Example:
format_type='IMF'
------------------
*Specific parameters:
- version (str) file version
- gin (gin) information node code
- datatype (str) R: reported, A: adjusted, Q: quasi-definit, D: definite
- kvals (Datastream) contains K value for iaf storage
- comment (string) some comment, currently used in IYFV
- kind (string) one of 'A' (all), 'Q' quiet days, 'D' disturbed days,
currently used in IYFV
format_type='IMAGCDF'
------------------
*General:
- Header input "Geodetic Longitude and Latitude": see format_type='IAGA'
*Specific parameters:
- addflags (BOOL) add flags to IMAGCDF output if True
format_type='BLV'
------------------
*Specific parameters:
- absinfo (str) parameter of DataAbsInfo
- fitfunc (str) fit function for baselinefit
- fitdegree
- knotstep
- extradays
- year (int) year
- meanh (float) annual mean of H component
- meanf (float) annual mean of F component
- deltaF (float) given deltaF value between pier and f position
- diff (DataStream) diff (deltaF) between vario and scalar
RETURNS:
- ... (bool) True if successful.
EXAMPLE:
>>> stream.write('/home/user/data',
format_type='IAGA')
>>> stringio = stream.write('StringIO',
format_type='IAGA')
APPLICATION:
"""
format_type = kwargs.get('format_type')
filenamebegins = kwargs.get('filenamebegins')
filenameends = kwargs.get('filenameends')
dateformat = kwargs.get('dateformat')
coverage = kwargs.get('coverage')
mode = kwargs.get('mode')
#period = kwargs.get('period') # TODO
#offsets = kwargs.get('offsets') # retired? TODO
keys = kwargs.get('keys')
absinfo = kwargs.get('absinfo')
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
extradays = kwargs.get('extradays')
year = kwargs.get('year')
meanh = kwargs.get('meanh')
meanf = kwargs.get('meanf')
deltaF = kwargs.get('deltaF')
diff = kwargs.get('diff')
baseparam = kwargs.get('baseparam')
version = kwargs.get('version')
gin = kwargs.get('gin')
datatype = kwargs.get('datatype')
kvals = kwargs.get('kvals')
kind = kwargs.get('kind')
comment = kwargs.get('comment')
useg = kwargs.get('useg')
skipcompression = kwargs.get('skipcompression')
debug = kwargs.get('debug')
addflags = kwargs.get('addflags')
headonly = kwargs.get('headonly')
success = True
#compression: provide compression factor for CDF data: 0 no compression, 9 high compression
t1 = datetime.utcnow()
if not format_type in PYMAG_SUPPORTED_FORMATS:
if not format_type:
format_type = 'PYSTR'
else:
logger.warning('write: Output format not supported.')
return False
else:
if not 'w' in PYMAG_SUPPORTED_FORMATS[format_type][0]:
logger.warning('write: Selected format does not support write methods.')
return False
format_type, filenamebegins, filenameends, coverage, dateformat = self._write_format(format_type, filenamebegins, filenameends, coverage, dateformat, year)
if not mode:
mode= 'overwrite'
if len(self) < 1 and len(self.ndarray[0]) < 1:
logger.error('write: Stream is empty!')
raise Exception("Can't write an empty stream to file!")
ndtype = False
if len(self.ndarray[0]) > 0:
self.ndarray[0] = self.ndarray[0].astype(float)
# remove all data from array where time is not numeric
#1. get indicies of nonnumerics in ndarray[0]
nonnumlist = np.asarray([idx for idx,elem in enumerate(self.ndarray[0]) if np.isnan(elem)])
#2. delete them
if len(nonnumlist) > 0:
print("write: Found NaNs in time column - deleting them", nonnumlist)
print(self.ndarray[0])
for idx, elem in enumerate(self.ndarray):
self.ndarray[idx] = np.delete(self.ndarray[idx],nonnumlist)
starttime = datetime.strptime(datetime.strftime(num2date(float(self.ndarray[0][0])).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
try:
lasttime = num2date(float(self.ndarray[0][-1])).replace(tzinfo=None)
except:
lasttime = num2date(float(self.ndarray[0][-2])).replace(tzinfo=None)
ndtype = True
else:
starttime = datetime.strptime(datetime.strftime(num2date(self[0].time).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
lasttime = num2date(self[-1].time).replace(tzinfo=None)
t2 = datetime.utcnow()
# divide stream in parts according to coverage and save them
newst = DataStream()
if coverage == 'month':
#starttime = datetime.strptime(datetime.strftime(num2date(self[0].time).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
cmonth = int(datetime.strftime(starttime,'%m')) + 1
cyear = int(datetime.strftime(starttime,'%Y'))
if cmonth == 13:
cmonth = 1
cyear = cyear + 1
monthstr = str(cyear) + '-' + str(cmonth) + '-' + '1T00:00:00'
endtime = datetime.strptime(monthstr,'%Y-%m-%dT%H:%M:%S')
while starttime < lasttime:
if ndtype:
lst = []
ndarray=self._select_timerange(starttime=starttime, endtime=endtime)
else:
lst = [elem for elem in self if starttime <= num2date(elem.time).replace(tzinfo=None) < endtime]
ndarray = np.asarray([])
newst = DataStream(lst,self.header,ndarray)
filename = filenamebegins + datetime.strftime(starttime,dateformat) + filenameends
# remove any eventually existing null byte
filename = filename.replace('\x00','')
if len(lst) > 0 or len(ndarray[0]) > 0:
success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,kvals=kvals,skipcompression=skipcompression,compression=compression, addflags=addflags)
starttime = endtime
# get next endtime
cmonth = int(datetime.strftime(starttime,'%m')) + 1
cyear = int(datetime.strftime(starttime,'%Y'))
if cmonth == 13:
cmonth = 1
cyear = cyear + 1
monthstr = str(cyear) + '-' + str(cmonth) + '-' + '1T00:00:00'
endtime = datetime.strptime(monthstr,'%Y-%m-%dT%H:%M:%S')
elif coverage == 'year':
#print ("write: Saving yearly data")
cyear = int(datetime.strftime(starttime,'%Y'))
cyear = cyear + 1
yearstr = str(cyear) + '-01-01T00:00:00'
endtime = datetime.strptime(yearstr,'%Y-%m-%dT%H:%M:%S')
while starttime < lasttime:
ndarray=self._select_timerange(starttime=starttime, endtime=endtime)
newst = DataStream([LineStruct()],self.header,ndarray)
if not dateformat == 'None':
dat = datetime.strftime(starttime,dateformat)
else:
dat = ''
filename = filenamebegins + dat + filenameends
# remove any eventually existing null byte
filename = filename.replace('\x00','')
if len(ndarray[0]) > 0:
success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,kvals=kvals,kind=kind,comment=comment,skipcompression=skipcompression,compression=compression, addflags=addflags)
# get next endtime
starttime = endtime
cyear = cyear + 1
yearstr = str(cyear) + '-01-01T00:00:00'
endtime = datetime.strptime(yearstr,'%Y-%m-%dT%H:%M:%S')
elif not coverage == 'all':
#starttime = datetime.strptime(datetime.strftime(num2date(self[0].time).replace(tzinfo=None),'%Y-%m-%d'),'%Y-%m-%d')
if coverage == 'hour':
cov = timedelta(hours=1)
else:
cov = timedelta(days=1)
dailystream = self.copy()
maxidx = -1
endtime = starttime + cov
while starttime < lasttime:
#lst = [elem for elem in self if starttime <= num2date(elem.time).replace(tzinfo=None) < endtime]
#newst = DataStream(lst,self.header)
t3 = datetime.utcnow()
#print "write - writing day:", t3
if ndtype:
lst = []
# non-destructive
#print "write: start and end", starttime, endtime
#print "write", dailystream.length()
#ndarray=self._select_timerange(starttime=starttime, endtime=endtime)
#print starttime, endtime, coverage
#print "Maxidx", maxidx
ndarray=dailystream._select_timerange(starttime=starttime, endtime=endtime, maxidx=maxidx)
#print "write", len(ndarray), len(ndarray[0])
if len(ndarray[0]) > 0:
#maxidx = len(ndarray[0])*2 ## That does not work for few seconds of first day and full coverage of all other days
dailystream.ndarray = np.asarray([array[(len(ndarray[0])-1):] for array in dailystream.ndarray])
#print dailystream.length()
#print len(ndarray), len(ndarray[0]), len(ndarray[1]), len(ndarray[3])
else:
lst = [elem for elem in self if starttime <= num2date(elem.time).replace(tzinfo=None) < endtime]
ndarray = np.asarray([np.asarray([]) for key in KEYLIST])
t4 = datetime.utcnow()
#print "write - selecting time range needs:", t4-t3
newst = DataStream(lst,self.header,ndarray)
filename = str(filenamebegins) + str(datetime.strftime(starttime,dateformat)) + str(filenameends)
# remove any eventually existing null byte
filename = filename.replace('\x00','')
if format_type == 'IMF':
filename = filename.upper()
if debug:
print ("Writing data:", os.path.join(filepath,filename))
if len(lst) > 0 or ndtype:
if len(newst.ndarray[0]) > 0 or len(newst) > 1:
logger.info('write: writing %s' % filename)
#print("Here", num2date(newst.ndarray[0][0]), newst.ndarray)
success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,version=version,gin=gin,datatype=datatype, useg=useg,skipcompression=skipcompression,compression=compression, addflags=addflags,headonly=headonly,kind=kind)
starttime = endtime
endtime = endtime + cov
t5 = datetime.utcnow()
#print "write - written:", t5-t3
#print "write - End:", t5
else:
filename = filenamebegins + filenameends
# remove any eventually existing null byte
filename = filename.replace('\x00','')
if debug:
print ("Writing file:", filename)
success = writeFormat(self, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,absinfo=absinfo,fitfunc=fitfunc,fitdegree=fitdegree, knotstep=knotstep,meanh=meanh,meanf=meanf,deltaF=deltaF,diff=diff,baseparam=baseparam, year=year,extradays=extradays,skipcompression=skipcompression,compression=compression, addflags=addflags,headonly=headonly,kind=kind)
return success
def idf2xyz(self,**kwargs):
"""
DEFINITION:
Converts inclination, declination, intensity (idf) data to xyz (i,d in 0.00000 deg (or gon)), f in nT
Working only for ndarrays
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
"""
unit = kwargs.get('unit')
keys = kwargs.get('keys')
if not len(self.ndarray[0]) > 0:
print("idf2xyz: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("idf2xyz: invalid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
dc = self.ndarray[indy].astype(float)*np.pi/(180.*ang_fac)
ic = self.ndarray[indx].astype(float)*np.pi/(180.*ang_fac)
self.ndarray[indx] = self.ndarray[indz].astype(float)*np.cos(dc)*np.cos(ic)
self.ndarray[indy] = self.ndarray[indz].astype(float)*np.sin(dc)*np.cos(ic)
self.ndarray[indz] = self.ndarray[indz].astype(float)*np.sin(ic)
self.header['col-x'] = 'X'
self.header['col-y'] = 'Y'
self.header['col-z'] = 'Z'
self.header['unit-col-x'] = 'nT'
self.header['unit-col-y'] = 'nT'
self.header['unit-col-z'] = 'nT'
self.header['DataComponents'] = self.header['DataComponents'].replace('IDF','XYZ')
return self
def xyz2idf(self,**kwargs):
"""
DEFINITION:
Converts x,y,z (all in nT) to inclination, declination, intensity (idf)
(i,d in 0.00000 deg (or gon)), f in nT
Working only for ndarrays
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
"""
keys = kwargs.get('keys')
if not len(self.ndarray[0]) > 0:
print("xyz2idf: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("xyz2idf: invalid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
h = np.sqrt(self.ndarray[indx].astype(float)**2 + self.ndarray[indy].astype(float)**2)
i = (180.*ang_fac)/np.pi * np.arctan2(self.ndarray[indz].astype(float), h)
d = (180.*ang_fac)/np.pi * np.arctan2(self.ndarray[indy].astype(float), self.ndarray[indx].astype(float))
f = np.sqrt(self.ndarray[indx].astype(float)**2+self.ndarray[indy].astype(float)**2+self.ndarray[indz].astype(float)**2)
self.ndarray[indx] = i
self.ndarray[indy] = d
self.ndarray[indz] = f
self.header['col-x'] = 'I'
self.header['col-y'] = 'D'
self.header['col-z'] = 'F'
self.header['unit-col-x'] = 'deg'
self.header['unit-col-y'] = 'deg'
self.header['unit-col-z'] = 'nT'
self.header['DataComponents'] = self.header['DataComponents'].replace('XYZ','IDF')
return self
def xyz2hdz(self,**kwargs):
"""
DEFINITION:
Converts x,y,z (all in nT) to horizontal, declination, z (hdz)
(d in 0.00000 deg (or gon)), h,z in nT
Working only for ndarrays
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
"""
keys = kwargs.get('keys')
if not len(self.ndarray[0]) > 0:
print("xyz2hdz: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("xyz2hdz: invalid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
h = np.sqrt(self.ndarray[indx].astype(float)**2 + self.ndarray[indy].astype(float)**2)
d = (180.*ang_fac) / np.pi * np.arctan2(self.ndarray[indy].astype(float), self.ndarray[indx].astype(float))
self.ndarray[indx] = h
self.ndarray[indy] = d
#dH = dX*X/sqrt(X^2 + Y^2) + dY*Y/sqrt(X^2 + Y^2)
#dD = 180/Pi*(dY*X/(X^2 + Y^2) - dX*Y/(X^2 + Y^2))
self.header['col-x'] = 'H'
self.header['col-y'] = 'D'
self.header['unit-col-x'] = 'nT'
self.header['unit-col-y'] = 'deg'
self.header['DataComponents'] = self.header['DataComponents'].replace('XYZ','HDZ')
return self
def hdz2xyz(self,**kwargs):
"""
DEFINITION:
Converts h,d,z (h,z in nT, d in deg) to xyz
Working only for ndarrays
PARAMETERS:
optional keywords:
unit (string) can be deg or gon
keys (list) list of three keys which hold h,d,z values
"""
keys = kwargs.get('keys')
if not len(self.ndarray[0]) > 0:
print("hdz2xyz: no data found")
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
print("hdz2xyz: invalid keys provided")
indx = KEYLIST.index(keys[0])
indy = KEYLIST.index(keys[1])
indz = KEYLIST.index(keys[2])
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
dc = self.ndarray[indy].astype(float)*np.pi/(180.*ang_fac)
prevxcol = self.ndarray[indx].astype(float)
self.ndarray[indx] = prevxcol * (np.cos(dc))
self.ndarray[indy] = prevxcol * (np.sin(dc))
#self.ndarray[indx] = self.ndarray[indx].astype(float) /np.sqrt((np.tan(dc))**2 + 1)
#self.ndarray[indy] = np.sqrt(self.ndarray[indx].astype(float)**2 - xtmp**2)
#print self.ndarray[indy]
#self.ndarray[indx] = xtmp
self.header['col-x'] = 'X'
self.header['col-y'] = 'Y'
self.header['col-z'] = 'Z'
self.header['unit-col-x'] = 'nT'
self.header['unit-col-y'] = 'nT'
self.header['unit-col-z'] = 'nT'
self.header['DataComponents'] = self.header['DataComponents'].replace('HDZ','XYZ')
return DataStream(self,self.header,self.ndarray)
class PyMagLog(object):
"""
Looging class for warning messages and analysis steps.
logger and warnings are lists of strings.
They contain full text information for file and screen output
"""
def __init__(self, logger=[], warnings=[], process=[], proc_count=0):
self.logger = logger
self.warnings = warnings
self.process = process
self.proc_count = proc_count
def __getitem__(self, key):
return self.key
def addwarn(self, warnmsg):
self.warnings.append(warnmsg)
def addlog(self, logmsg):
self.logger.append(logmsg)
def addpro(self, promsg):
self.process.append(promsg)
def clearpro(self):
process = []
def clearlog(self):
logger = []
def clearwarn(self):
warnings = []
def addcount(self, num, maxnum):
"""
creates an integer number relative to maxnum ranging from 0 to 100
assuming num starting at zero
"""
self.proc_count = int(np.round(num*100/maxnum))
def clearcount(self):
self.proc_count = 0
def _removeduplicates(self,content):
return list(set(content))
"""
def sendLogByMail(self,loglist,**kwargs):
smtpserver = kwargs.get('smtpserver')
sender = kwargs.get('sender')
user = kwargs.get('user')
pwd = <PASSWORD>('<PASSWORD>')
destination = kwargs.get('destination')
subject = kwargs.get('subject')
if not smtpserver:
smtpserver = 'smtp.internet.at'
if not sender:
sender = '<EMAIL>'
if not destination:
destination = ['<EMAIL>']
if not user:
user = "FrauMusterfrau"
if not pwd:
pwd = "<PASSWORD>"
if not subject:
subject= 'MagPy Log from %s' % datetime.utcnow()
# typical values for text_subtype are plain, html, xml
text_subtype = 'plain'
content = '\n'.join(''.join(line) for line in loglist)
try:
msg = MIMEText(content, text_subtype)
msg['Subject']= subject
msg['From'] = sender # some SMTP servers will do this automatically, not all
smtp = SMTP()
smtp.set_debuglevel(False)
smtp.connect(smtpserver, 587)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(user, pwd)
try:
smtp.sendmail(sender, destination, msg.as_string())
finally:
smtp.close()
except Exception as exc:
raise ValueError( "mail failed; %s" % str(exc) ) # give a error message
"""
def combineWarnLog(self,warning,log):
comlst = ['Warning:']
comlst.extend(self._removeduplicates(warning))
comlst.extend(['Non-critical info:'])
comlst.extend(self._removeduplicates(log))
return comlst
class LineStruct(object):
def __init__(self, time=float('nan'), x=float('nan'), y=float('nan'), z=float('nan'), f=float('nan'), dx=float('nan'), dy=float('nan'), dz=float('nan'), df=float('nan'), t1=float('nan'), t2=float('nan'), var1=float('nan'), var2=float('nan'), var3=float('nan'), var4=float('nan'), var5=float('nan'), str1='-', str2='-', str3='-', str4='-', flag='0000000000000000-', comment='-', typ="xyzf", sectime=float('nan')):
#def __init__(self):
#- at the end of flag is important to be recognized as string
"""
self.time=float('nan')
self.x=float('nan')
self.y=float('nan')
self.z=float('nan')
self.f=float('nan')
self.dx=float('nan')
self.dy=float('nan')
self.dz=float('nan')
self.df=float('nan')
self.t1=float('nan')
self.t2=float('nan')
self.var1=float('nan')
self.var2=float('nan')
self.var3=float('nan')
self.var4=float('nan')
self.var5=float('nan')
self.str1=''
self.str2=''
self.str3=''
self.str4=''
self.flag='0000000000000000-'
self.comment='-'
self.typ="xyzf"
self.sectime=float('nan')
"""
self.time = time
self.x = x
self.y = y
self.z = z
self.f = f
self.dx = dx
self.dy = dy
self.dz = dz
self.df = df
self.t1 = t1
self.t2 = t2
self.var1 = var1
self.var2 = var2
self.var3 = var3
self.var4 = var4
self.var5 = var5
self.str1 = str1
self.str2 = str2
self.str3 = str3
self.str4 = str4
self.flag = flag
self.comment = comment
self.typ = typ
self.sectime = sectime
def __repr__(self):
return repr((self.time, self.x, self.y, self.z, self.f, self.dx, self.dy, self.dz, self.df, self.t1, self.t2, self.var1, self.var2, self.var3, self.var4, self.var5, self.str1, self.str2, self.str3, self.str4, self.flag, self.comment, self.typ))
def __getitem__(self, index):
key = KEYLIST[index]
return getattr(self, key)
def __setitem__(self, index, value):
key = KEYLIST[index]
setattr(self, key.lower(), value)
def idf2xyz(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
dc = self.y*np.pi/(180.*ang_fac)
ic = self.x*np.pi/(180.*ang_fac)
self.x = self.z*np.cos(dc)*np.cos(ic)
self.y = self.z*np.sin(dc)*np.cos(ic)
self.z = self.z*np.sin(ic)
return self
def xyz2idf(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
h = np.sqrt(self.x**2 + self.y**2)
i = (180.*ang_fac)/np.pi * math.atan2(self.z, h)
d = (180.*ang_fac)/np.pi * math.atan2(self.y, self.x)
f = np.sqrt(self.x**2+self.y**2+self.z**2)
self.x = i
self.y = d
self.z = f
return self
def xyz2hdz(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
h = np.sqrt(self.x**2 + self.y**2)
d = (180.*ang_fac) / np.pi * math.atan2(self.y, self.x)
self.x = h
self.y = d
#dH = dX*X/sqrt(X^2 + Y^2) + dY*Y/sqrt(X^2 + Y^2)
#dD = 180/Pi*(dY*X/(X^2 + Y^2) - dX*Y/(X^2 + Y^2))
return self
def hdz2xyz(self,**kwargs):
"""
keyword:
unit: (string) can be deg or gon
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
dc = self.y*np.pi/(180.*ang_fac)
xtmp = self.x /np.sqrt((np.tan(dc))**2 + 1)
self.y = np.sqrt(self.x**2 - xtmp**2)
self.x = xtmp
return self
def rotation(self,alpha=None,beta=None,**kwargs):
"""
Rotation matrix for ratating x,y,z to new coordinate system xs,ys,zs using angles alpha and beta
alpha is the horizontal rotation in degree, beta the vertical
"""
unit = kwargs.get('unit')
if unit == 'gon':
ang_fac = 400./360.
elif unit == 'rad':
ang_fac = np.pi/180.
else:
ang_fac = 1.
xval = self.x
yval = self.y
zval = self.z
ra = ni.pi*alpha/(180.*ang_fac)
rb = ni.pi*beta/(180.*ang_fac)
xs = self.x*np.cos(rb)*np.cos(ra)-self.y*np.sin(ra)+self.z*np.sin(rb)*np.cos(ra)
ys = self.x*np.cos(rb)*np.sin(ra)+self.y*np.cos(ra)+self.z*np.sin(rb)*np.sin(ra)
zs = self.x*np.sin(rb)+self.z*np.cos(rb)
xs2 = xval*np.cos(rb)*np.cos(ra)-yval*np.sin(ra)+zval*np.sin(rb)*np.cos(ra)
ys2 = xval*np.cos(rb)*np.sin(ra)+yval*np.cos(ra)+zval*np.sin(rb)*np.sin(ra)
zs2 = xval*np.sin(rb)+zval*np.cos(rb)
self.x = xs
self.y = ys
self.z = zs
return self
# Unused classes
"""
class ColStruct(object):
def __init__(self,length, time=float('nan'), x=float('nan'), y=float('nan'), z=float('nan'), f=float('nan'), dx=float('nan'), dy=float('nan'), dz=float('nan'), df=float('nan'), t1=float('nan'), t2=float('nan'), var1=float('nan'), var2=float('nan'), var3=float('nan'), var4=float('nan'), var5=float('nan'), str1='-', str2='-', str3='-', str4='-', flag='0000000000000000-', comment='-', typ="xyzf", sectime=float('nan')):
#""
Not used so far. Maybe useful for
Speed optimization:
Change the whole thing to column operations
- at the end of flag is important to be recognized as string
for column initialization use a length parameter and "lenght*[float('nan')]" or "lenght*['-']"to initialize nan-values
#""
self.length = length
self.time = length*[time]
self.x = length*[x]
self.y = length*[y]
self.z = length*[z]
self.f = length*[f]
self.dx = length*[dx]
self.dy = length*[dy]
self.dz = length*[dz]
self.df = length*[df]
self.t1 = length*[t1]
self.t2 = length*[t2]
self.var1 = length*[var1]
self.var2 = length*[var2]
self.var3 = length*[var3]
self.var4 = length*[var4]
self.var5 = length*[var5]
self.str1 = length*[str1]
self.str2 = length*[str2]
self.str3 = length*[str3]
self.str4 = length*[str4]
self.flag = length*[flag]
self.comment = length*[comment]
self.typ = length*[typ]
self.sectime = length*[sectime]
def __repr__(self):
return repr((self.time, self.x, self.y, self.z, self.f, self.dx, self.dy, self.dz, self.df, self.t1, self.t2, self.var1, self.var2, self.var3, self.var4, self.var5, self.str1, self.str2, self.str3, self.str4, self.flag, self.comment, self.typ, self.sectime))
"""
# -------------------
# Global functions of the stream file
# -------------------
def coordinatetransform(u,v,w,kind):
"""
DESCRIPTION:
Transforms given values and returns [d,i,h,x,y,z,f] if successful, False if not.
Parameter "kind" defines the type of provided values
APPLICATION:
list = coordinatetransform(meanx,meany,meanz,'xyz')
"""
if not kind in ['xyz','hdz','dhz','idf']:
return [0]*7
if kind == 'xyz':
h = np.sqrt(u**2 + v**2)
i = (180.)/np.pi * np.arctan2(w, h)
d = (180.)/np.pi * np.arctan2(v, u)
f = np.sqrt(u**2+v**2+w**2)
return [d,i,h,u,v,w,f]
elif kind == 'hdz':
dc = v*np.pi/(180.)
xtmp = u /np.sqrt((np.tan(dc))**2 + 1)
y = np.sqrt(u**2 - xtmp**2)
x = xtmp
f = np.sqrt(x**2+y**2+w**2)
i = (180.)/np.pi * np.arctan2(w, u)
return [v,i,u,x,y,w,f]
elif kind == 'dhz':
dc = u*np.pi/(180.)
xtmp = v /np.sqrt((np.tan(dc))**2 + 1)
y = np.sqrt(v**2 - xtmp**2)
x = xtmp
f = np.sqrt(h**2+w**2)
i = (180.)/np.pi * np.arctan2(w, v)
return [u,i,v,x,y,w,f]
return [0]*7
def isNumber(s):
"""
Test whether s is a number
"""
try:
float(s)
return True
except ValueError:
return False
def find_nearest(array,value):
"""
Find the nearest element within an array
"""
# Eventually faster solution (minimal)
#idx = np.searchsorted(array, value, side="left")
#if math.fabs(value - array[idx-1]) < math.fabs(value - array[idx]):
# return array[idx-1], idx-1
#else:
# return array[idx], idx
idx = (np.abs(array-value)).argmin()
return array[idx], idx
def ceil_dt(dt,seconds):
"""
DESCRIPTION:
Function to round time to the next time step as given by its seconds
minute: 60 sec
quater hour: 900 sec
hour: 3600 sec
PARAMETER:
dt: (datetime object)
seconds: (integer)
USAGE:
>>>print ceil_dt(datetime(2014,01,01,14,12,04),60)
>>>2014-01-01 14:13:00
>>>print ceil_dt(datetime(2014,01,01,14,12,04),3600)
>>>2014-01-01 15:00:00
>>>print ceil_dt(datetime(2014,01,01,14,7,0),60)
>>>2014-01-01 14:07:00
"""
#how many secs have passed this hour
nsecs = dt.minute*60+dt.second+dt.microsecond*1e-6
if nsecs % seconds:
delta = (nsecs//seconds)*seconds+seconds-nsecs
return dt + timedelta(seconds=delta)
else:
return dt
# ##################
# read/write functions
# ##################
def read(path_or_url=None, dataformat=None, headonly=False, **kwargs):
"""
DEFINITION:
The read functions tries to open the selected files. Calls on
function _read() for help.
PARAMETERS:
Variables:
- path_or_url: (str) Path to data files in form:
a) c:\my\data\*
b) c:\my\data\thefile.txt
c) /home/data/*
d) /home/data/thefile.txt
e) ftp://server/directory/
f) ftp://server/directory/thefile.txt
g) http://www.thepage.at/file.tab
- headonly: (?) ???
Kwargs:
- dataformat: (str) Format of data file. Works as auto-detection.
- disableproxy: (bool) If True, will use urllib2.install_opener()
- endtime: (str/datetime object) Description.
- starttime: (str/datetime object) Description.
Format specific kwargs:
IAF:
- resolution: (str) can be either 'day','hour','minute'(default) or 'k'
RETURNS:
- stream: (DataStream object) Stream containing data in file
under path_or_url.
EXAMPLE:
>>> stream = read('/srv/archive/WIC/LEMI025/LEMI025_2014-05-05.bin')
OR
>>> stream = read('http://www.swpc.noaa.gov/ftpdir/lists/ace/20140507_ace_sis_5m.txt')
APPLICATION:
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
debugmode = kwargs.get('debugmode')
disableproxy = kwargs.get('disableproxy')
skipsorting = kwargs.get('skipsorting')
keylist = kwargs.get('keylist') # for PYBIN
debug = kwargs.get('debug')
if disableproxy:
proxy_handler = ProxyHandler( {} )
opener = build_opener(proxy_handler)
# install this opener
install_opener(opener)
# 1. No path
if not path_or_url:
logger.error("read: File not specified.")
raise Exception("No path given for data in read function!")
# 2. Create DataStream
st = DataStream([],{},np.array([[] for ke in KEYLIST]))
# 3. Read data
if not isinstance(path_or_url, basestring):
# not a string - we assume a file-like object
pass
"""
elif path_or_url.startswith("DB:"):
# a database table
if
logger.error("read: File not specified.")
raise Exception("No path given for data in read function!")
pathname = path_or_url
for file in iglob(pathname):
stp = DataStream([],{},np.array([[] for ke in KEYLIST]))
stp = _read(file, dataformat, headonly, **kwargs) glob
"""
elif "://" in path_or_url:
# some URL
# extract extension if any
logger.info("read: Found URL to read at {}".format(path_or_url))
content = urlopen(path_or_url).read()
content = content.decode('utf-8')
if content.find('<pre>') > -1:
"""
check whether content is coming with some html tags
"""
def get_between(s,first,last):
start = s.index(first) + len(first)
end = s.index(last, start )
return s[start:end]
content_t = get_between(content, '<pre>', '</pre>')
cleanr = re.compile('<.*?>')
content = re.sub(cleanr, '', content_t)
#print ("HERE", path_or_url)
if debugmode:
print(urlopen(path_or_url).info())
if path_or_url[-1] == '/':
# directory
string = content.decode('utf-8')
for line in string.split("\n"):
if len(line) > 1:
filename = (line.strip().split()[-1])
if debugmode:
print(filename)
content = urlopen(path_or_url+filename).read()
suffix = '.'+os.path.basename(path_or_url).partition('.')[2] or '.tmp'
#date = os.path.basename(path_or_url).partition('.')[0][-8:]
#date = re.findall(r'\d+',os.path.basename(path_or_url).partition('.')[0])
date = os.path.basename(path_or_url).partition('.')[0] # append the full filename to the temporary file
fname = date+suffix
fname = fname.strip('?').strip(':') ## Necessary for windows
#fh = NamedTemporaryFile(suffix=date+suffix,delete=False)
fh = NamedTemporaryFile(suffix=fname,delete=False)
print (fh.name, suffix)
fh.write(content)
fh.close()
stp = _read(fh.name, dataformat, headonly, **kwargs)
if len(stp) > 0: # important - otherwise header is going to be deleted
st.extend(stp.container,stp.header,stp.ndarray)
os.remove(fh.name)
else:
# TODO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# check whether content is a single file or e.g. a ftp-directory
# currently only single files are supported
# ToDo !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
suffix = '.'+os.path.basename(path_or_url).partition('.')[2] or '.tmp'
#date = os.path.basename(path_or_url).partition('.')[0][-8:]
#date = re.findall(r'\d+',os.path.basename(path_or_url).partition('.')[0])[0]
date = os.path.basename(path_or_url).partition('.')[0] # append the full filename to the temporary file
fname = date+suffix
fname = fname.replace('?','').replace(':','') ## Necessary for windows
fh = NamedTemporaryFile(suffix=fname,delete=False,mode='w+')
fh.write(content)
fh.close()
st = _read(fh.name, dataformat, headonly, **kwargs)
os.remove(fh.name)
else:
# some file name
pathname = path_or_url
for filename in iglob(pathname):
getfile = True
theday = extractDateFromString(filename)
#print (" Extracted date:", theday) # Doesnt work for IAF files
try:
if starttime:
if not theday[-1] >= datetime.date(st._testtime(starttime)):
getfile = False
if endtime:
if not theday[0] <= datetime.date(st._testtime(endtime)):
getfile = False
except:
# Date format not recognised. Read all files
logger.info("read: Unable to detect date string in filename. Reading all files...")
#logger.warning("read: filename: {}, theday: {}".format(filename,theday))
getfile = True
if getfile:
if filename.endswith('.gz') or filename.endswith('.GZ'):
## Added gz support to read IMO compressed data directly - future option might include tarfiles
import gzip
print ("Found zipped file (gz) ... unpacking")
fname = os.path.split(filename)[1]
fname = fname.strip('.gz')
with NamedTemporaryFile(suffix=fname,delete=False) as fh:
shutil.copyfileobj(gzip.open(filename), fh)
filename = fh.name
if filename.endswith('.zip') or filename.endswith('.ZIP'):
## Added gz support to read IMO compressed data directly - future option might include tarfiles
from zipfile import ZipFile
print ("Found zipped file (zip) ... unpacking")
with ZipFile(filename) as myzip:
fname = myzip.namelist()[0]
with NamedTemporaryFile(suffix=fname,delete=False) as fh:
shutil.copyfileobj(myzip.open(fname), fh)
filename = fh.name
stp = DataStream([],{},np.array([[] for ke in KEYLIST]))
try:
stp = _read(filename, dataformat, headonly, **kwargs)
except:
stp = DataStream([],{},np.array([[] for ke in KEYLIST]))
logger.warning("read: File {} could not be read. Skipping ...".format(filename))
if (len(stp) > 0 and not np.isnan(stp[0].time)) or len(stp.ndarray[0]) > 0: # important - otherwise header is going to be deleted
st.extend(stp.container,stp.header,stp.ndarray)
#del stp
if st.length()[0] == 0:
# try to give more specific information why the stream is empty
if has_magic(pathname) and not glob(pathname):
logger.error("read: No file matching file pattern: %s" % pathname)
raise Exception("Cannot read non-existent file!")
elif not has_magic(pathname) and not os.path.isfile(pathname):
logger.error("read: No such file or directory: %s" % pathname)
raise Exception("Cannot read non-existent file!")
# Only raise error if no starttime/endtime has been set. This
# will return an empty stream if the user chose a time window with
# no data in it.
# XXX: Might cause problems if the data is faulty and the user
# set starttime/endtime. Not sure what to do in this case.
elif not 'starttime' in kwargs and not 'endtime' in kwargs:
logger.error("read: Cannot open file/files: %s" % pathname)
elif 'starttime' in kwargs or 'endtime' in kwargs:
logger.error("read: Cannot read data. Probably no data available in the time range provided!")
raise Exception("No data available in time range")
else:
logger.error("read: Unknown error occurred. No data in stream!")
raise Exception("Unknown error occurred during reading. No data in stream!")
if headonly and (starttime or endtime):
msg = "read: Keyword headonly cannot be combined with starttime or endtime."
logger.error(msg)
# Sort the input data regarding time
if not skipsorting:
st = st.sorting()
# eventually trim data
if starttime:
st = st.trim(starttime=starttime)
if endtime:
st = st.trim(endtime=endtime)
### Define some general header information TODO - This is done already in some format libs - clean up
st.header['DataSamplingRate'] = float("{0:.2f}".format(st.samplingrate()))
return st
#@uncompressFile
def _read(filename, dataformat=None, headonly=False, **kwargs):
"""
Reads a single file into a MagPy DataStream object.
Internal function only.
"""
debug = kwargs.get('debug')
stream = DataStream([],{})
format_type = None
foundapproptiate = False
if not dataformat:
# auto detect format - go through all known formats in given sort order
for format_type in PYMAG_SUPPORTED_FORMATS:
# check format
if debug:
print("_read: Testing format: {} ...".format(format_type))
if debug:
logger.info("_read: Testing format: {} ...".format(format_type))
#try:
# readsucc = isFormat(filename, format_type)
#except:
# readsucc = False
if isFormat(filename, format_type):
if debug:
logger.info(" -- found: {}".format(format_type))
print (" -- found: {}".format(format_type))
foundapproptiate = True
break
if not foundapproptiate:
temp = open(filename, 'rt').readline()
if temp.startswith('# MagPy Absolutes'):
logger.warning("_read: You apparently tried to open a DI object - please use the absoluteAnalysis method")
else:
logger.error("_read: Could not identify a suitable data format")
return DataStream([LineStruct()],{},np.asarray([[] for el in KEYLIST]))
else:
# format given via argument
dataformat = dataformat.upper()
try:
formats = [el for el in PYMAG_SUPPORTED_FORMATS if el == dataformat]
format_type = formats[0]
except IndexError:
msg = "Format \"%s\" is not supported. Supported types: %s"
logger.error(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))
raise TypeError(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))
"""
try:
# search readFormat for given entry point
readFormat = load_entry_point(format_ep.dist.key,
'obspy.plugin.waveform.%s' % (format_ep.name), 'readFormat')
except ImportError:
msg = "Format \"%s\" is not supported. Supported types: %s"
raise TypeError(msg % (format_ep.name,
', '.join(WAVEFORM_ENTRY_POINTS)))
"""
stream = readFormat(filename, format_type, headonly=headonly, **kwargs)
return stream
def saveflags(mylist=None,path=None, overwrite=False):
"""
DEFINITION:
Save list e.g. flaglist to file using pickle.
PARAMETERS:
Variables:
- path: (str) Path to data files in form:
RETURNS:
- True if succesful otherwise False
EXAMPLE:
>>> saveflags(flaglist,'/my/path/myfile.pkl')
"""
print("Saving flaglist ...")
if not mylist:
print("error 1")
return False
if not path:
path = 'myfile.pkl'
if not overwrite:
existflag = loadflags(path)
existflag.extend(mylist)
mylist = existflag
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if path.endswith('.json'):
print(" -- using json format ")
try:
import json
def dateconv(d):
# Converter to serialize datetime objects in json
if isinstance(d,datetime):
return d.__str__()
# Convert mylist to a dictionary
mydic = {}
# get a list of unique sensorid
sid = [elem[5] for elem in mylist]
sid = list(set(sid))
for s in sid:
slist = [elem[0:5]+elem[6:] for elem in mylist if elem[5] == s]
mydic[s] = slist
## Dictionary looks like {SensorID:[[t1,t2,xxx,xxx,],[x...]]}
with open(path,'w',encoding='utf-8') as file:
file.write(unicode(json.dumps(mydic,default=dateconv)))
print("saveflags: list saved to a json file: {}".format(path))
return True
except:
return False
else:
print(" -- using pickle")
try:
# TODO: check whether package is already loaded
from pickle import dump
dump(mylist,open(path,'wb'))
print("saveflags: list saved to {}".format(path))
return True
except:
return False
def loadflags(path=None,sensorid=None,begin=None, end=None):
"""
DEFINITION:
Load list e.g. flaglist from file using pickle.
PARAMETERS:
Variables:
- path: (str) Path to data files in form:
- begin: (datetime)
- end: (datetime)
RETURNS:
- list (e.g. flaglist)
EXAMPLE:
>>> loadflags('/my/path/myfile.pkl')
"""
if not path:
return []
if path.endswith('.json'):
try:
import json
print ("Reading a json style flaglist...")
def dateparser(dct):
# Convert dates in dictionary to datetime objects
for (key,value) in dct.items():
for i,line in enumerate(value):
for j,elem in enumerate(line):
if str(elem).count('-') + str(elem).count(':') == 4:
try:
try:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S.%f")
except:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S")
except:
pass
dct[key] = value
return dct
if os.path.isfile(path):
with open(path,'r') as file:
mydic = json.load(file,object_hook=dateparser)
if sensorid:
mylist = mydic.get(sensorid,'')
do = [el.insert(5,sensorid) for el in mylist]
else:
mylist = []
for s in mydic:
ml = mydic[s]
do = [el.insert(5,s) for el in ml]
mylist.extend(mydic[s])
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
return mylist
else:
print ("Flagfile not yet existing ...")
return []
except:
return []
else:
try:
from pickle import load as pklload
mylist = pklload(open(path,"rb"))
print("loadflags: list {a} successfully loaded, found {b} inputs".format(a=path,b=len(mylist)))
if sensorid:
print(" - extracting data for sensor {}".format(sensorid))
mylist = [el for el in mylist if el[5] == sensorid]
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
#print(" -> remaining flags: {b}".format(b=len(mylist)))
return mylist
except:
return []
def joinStreams(stream_a,stream_b, **kwargs):
"""
DEFINITION:
Copy two streams together eventually replacing already existing time steps.
Data of stream_a will replace data of stream_b
APPLICATION
combinedstream = joinStreams(stream_a,stream_b)
"""
logger.info('joinStreams: Start joining at %s.' % str(datetime.now()))
# Check stream type and eventually convert them to ndarrays
# --------------------------------------
ndtype = False
if len(stream_a.ndarray[0]) > 0:
# Using ndarray and eventually convert stream_b to ndarray as well
ndtype = True
if not len(stream_b.ndarray[0]) > 0:
stream_b = stream_b.linestruct2ndarray()
if not len(stream_b.ndarray[0]) > 0:
return stream_a
elif len(stream_b.ndarray[0]) > 0:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
if not len(stream_a.ndarray[0]) > 0:
return stream_b
else:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
stream_b = stream_b.linestruct2ndarray()
if not len(stream_a.ndarray[0]) > 0 and not len(stream_b.ndarray[0]) > 0:
logger.error('subtractStreams: stream(s) empty - aborting subtraction.')
return stream_a
# non-destructive
# --------------------------------------
sa = stream_a.copy()
sb = stream_b.copy()
# Get indicies of timesteps of stream_b of which identical times are existing in stream_a-> delelte those lines
# --------------------------------------
# IMPORTANT: If two streams with different keys should be combined then "merge" is the method of choice
# NEW: shape problems when removing data -> now use removeduplicates at the end
# SHOULD WORK (already tested) as remove duplicate will keep the last value and drop earlier occurences
#indofb = np.nonzero(np.in1d(sb.ndarray[0], sa.ndarray[0]))[0]
#for idx,elem in enumerate(sb.ndarray):
# if len(sb.ndarray[idx]) > 0:
# sb.ndarray[idx] = np.delete(sb.ndarray[idx],indofb)
# Now add stream_a to stream_b - regard for eventually missing column data
# --------------------------------------
array = [[] for key in KEYLIST]
for idx,elem in enumerate(sb.ndarray):
if len(sa.ndarray[idx]) > 0 and len(sb.ndarray[idx]) > 0:
array[idx] = np.concatenate((sa.ndarray[idx],sb.ndarray[idx]))
elif not len(sa.ndarray[idx]) > 0 and len(sb.ndarray[idx]) > 0:
if idx < len(NUMKEYLIST):
fill = float('nan')
else:
fill = '-'
arraya = np.asarray([fill]*len(sa.ndarray[0]))
array[idx] = np.concatenate((arraya,sb.ndarray[idx]))
elif len(sa.ndarray[idx]) > 0 and not len(sb.ndarray[idx]) > 0:
if idx < len(NUMKEYLIST):
fill = float('nan')
else:
fill = '-'
arrayb = np.asarray([fill]*len(sb.ndarray[0]))
array[idx] = np.concatenate((sa.ndarray[idx],arrayb))
else:
array[idx] = np.asarray([])
stream = DataStream([LineStruct()],sa.header,np.asarray(array,dtype=object))
stream = stream.removeduplicates()
return stream.sorting()
def appendStreams(streamlist):
"""
DESCRIPTION:
Appends contents of streamlist and returns a single new stream.
Duplicates are removed and the new stream is sorted.
"""
array = [[] for key in KEYLIST]
for idx,key in enumerate(KEYLIST):
# Get tuple of array
arlist = []
for stream in streamlist:
if len(stream.ndarray[idx]) > 0:
array[idx].extend(stream.ndarray[idx])
stream = DataStream([LineStruct()],streamlist[0].header,np.asarray(array).astype(object))
if len(stream.ndarray[0]) > 0:
stream = stream.removeduplicates()
stream = stream.sorting()
return stream
else:
return DataStream([LineStruct()],streamlist[0].header,np.asarray([np.asarray([]) for key in KEYLIST]))
def mergeStreams(stream_a, stream_b, **kwargs):
"""
DEFINITION:
Combine the contents of two data streams realtive to stream_a.
Basically three modes are possible:
1. Insert data from stream_b into stream_a based on timesteps of stream_a
- if keys are provided only these specific columns are inserted into a
- default: if data is existing in stream_a only nans are replaced
here flags (4) can be set and a comment "inserted from SensorID" is added
- eventually use get_gaps to identfy missing timesteps in stream_a before
2. Replace
- same as insert but here all existing time series data is replaced by
corresponding data from stream_b
3. Drop
- drops the whole column from stream_a and fills it with stream_b data
The streams need to overlapp, base stream is stream_a of which the time range
is not modfified. If you want to extend this stream by new data use the extend
method.
1. replace data from specific columns of stream_a with data from stream_b.
- requires keys
2. fill gaps in stream_a data with stream_b data without replacing any data.
- extend = True
PARAMETERS:
Variables:
- stream_a (DataStream object) main stream
- stream_b (DataStream object) this stream is merged into stream_a
Kwargs:
- addall: (bool) Add all elements from stream_b
- extend: (bool) Time range of stream b is eventually added to stream a.
Default False.
If extend = true => any existing date which is not present in stream_a
will be filled by stream_b
- mode: (string) 'insert' or 'replace' or 'drop'. drop removes stream_a column, replace will change values no matter what, insert will only replace nan's (default)
- keys: (list) List of keys to add from stream_b into stream_a.
- flag: (bool) if true, a flag will be added to each merged line (default: flagid = 4, comment = "keys ... added from sensorid b").
- comment: (str) Define comment to stream_b data in stream_a.
- replace: (bool) Allows existing stream_a values to be replaced by stream_b ones.
RETURNS:
- Datastream(stream_a): (DataStream) DataStream object.
EXAMPLE:
>>> # Joining two datasets together:
>>> alldata = mergeStreams(lemidata, gsmdata, keys=['f'])
# f of gsm will be added to lemi
# inserting missing values from another stream
>>> new_gsm = mergeStreams(gsm1, gsm2, keys=['f'], mode='insert')
# all missing values (nans) of gsm1 will be filled by gsm2 values (if existing)
APPLICATION:
"""
# old (LineStruct) too be removed
addall = kwargs.get('addall')
replace = kwargs.get('replace')
extend = kwargs.get('extend')
# new
mode = kwargs.get('mode')
flag = kwargs.get('flag')
keys = kwargs.get('keys')
comment = kwargs.get('comment')
flagid = kwargs.get('flagid')
if not mode:
mode = 'insert' # other possibilities: replace, ...
if not keys:
keys = stream_b._get_key_headers()
# Defining default comment
# --------------------------------------
headera = stream_a.header
headerb = stream_b.header
try:
sensidb = headerb['SensorID']
except:
sensidb = 'stream_b'
# Better: create a flaglist and apply stream.flag(flaglist) with flag 4
if not comment:
comment = 'keys %s added from %s' % (','.join(keys), sensidb)
if not flagid:
flagid = 4
fllst = [] # flaglist
logger.info('mergeStreams: Start mergings at %s.' % str(datetime.now()))
# Check stream type and eventually convert them to ndarrays
# --------------------------------------
ndtype = False
if len(stream_a.ndarray[0]) > 0:
# Using ndarray and eventually convert stream_b to ndarray as well
ndtype = True
if not len(stream_b.ndarray[0]) > 0:
stream_b = stream_b.linestruct2ndarray()
elif len(stream_b.ndarray[0]) > 0:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
else:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
stream_b = stream_b.linestruct2ndarray()
if not len(stream_a.ndarray[0]) > 0 and len(stream_b.ndarray[0]) > 0:
logger.error('subtractStreams: stream(s) empty - aborting subtraction.')
return stream_a
# non-destructive
# --------------------------------------
sa = stream_a.copy()
sb = stream_b.copy()
sa = sa.removeduplicates()
sb = sb.removeduplicates()
# Sampling rates
# --------------------------------------
sampratea = sa.samplingrate()
samprateb = sb.samplingrate()
minsamprate = min(sampratea,samprateb)
if ndtype:
timea = sa.ndarray[0]
else:
timea = sa._get_column('time')
# truncate b to time range of a
# --------------------------------------
try:
sb = sb.trim(starttime=num2date(timea[0]).replace(tzinfo=None), endtime=num2date(timea[-1]).replace(tzinfo=None)+timedelta(seconds=samprateb),newway=True)
except:
print("mergeStreams: stream_a and stream_b are apparently not overlapping - returning stream_a")
return stream_a
if ndtype:
timeb = sb.ndarray[0]
else:
timeb = sb._get_column('time')
# keeping a - changed by leon 10/2015
"""
# truncate a to range of b
# --------------------------------------
try:
sa = sa.trim(starttime=num2date(timeb[0]).replace(tzinfo=None), endtime=num2date(timeb[-1]).replace(tzinfo=None)+timedelta(seconds=sampratea),newway=True)
except:
print "mergeStreams: stream_a and stream_b are apparently not overlapping - returning stream_a"
return stream_a
# redo timea calc after trimming
# --------------------------------------
if ndtype:
timea = sa.ndarray[0]
else:
timea = sa._get_column('time')
"""
# testing overlapp
# --------------------------------------
if not len(sb) > 0:
print("subtractStreams: stream_a and stream_b are not overlapping - returning stream_a")
return stream_a
timea = maskNAN(timea)
timeb = maskNAN(timeb)
orgkeys = stream_a._get_key_headers()
# master header
# --------------------------------------
header = sa.header
# just add the merged sensorid
header['SecondarySensorID'] = sensidb
## Speed up of unequal timesteps - limit search range
# - search range small (fracratio high) if t_limits are similar and data is periodic
# - search range large (fracratio small) if t_limits are similar and data is periodic
# - fracratio = 1 means that the full stream_b data set is searched
# - fracratio = 20 means that +-5percent of stream_b are searched arround expected index
#print("mergeStream", sa.length(), sb.length(), sa._find_t_limits(), sb._find_t_limits())
fracratio = 2 # modify if start and endtime are different
speedup = True
if speedup and ndtype:
ast, aet = sa._find_t_limits()
bst, bet = sb._find_t_limits()
uncert = (date2num(aet)-date2num(ast))*0.01
#print ("Merge speedup", uncert, ast, aet, bst, bet)
if not bst < ast+timedelta(minutes=uncert*24*60):
print ("Merge: Starttime of stream_b too large")
for indx,key in enumerate(KEYLIST):
if key == 'time':
### Changes from 2019-01-15: modified axis - originally working fine, however except for saggitarius
#sb.ndarray[0] = np.append(np.asarray([date2num(ast)]), sb.ndarray[0],1)
sb.ndarray[0] = np.append(np.asarray([date2num(ast)]), sb.ndarray[0])
elif key == 'sectime' or key in NUMKEYLIST:
if not len(sb.ndarray[indx]) == 0:
#sb.ndarray[indx] = np.append(np.asarray([np.nan]),sb.ndarray[indx],1)
sb.ndarray[indx] = np.append( | np.asarray([np.nan]) | numpy.asarray |
"""Probablistic forecast error metrics."""
import numpy as np
def brier_score(obs, fx, fx_prob):
"""Brier Score (BS).
BS = 1/n sum_{i=1}^n (f_i - o_i)^2
where n is the number of forecasts, f_i is the forecasted probability of
event i, and o_i is the observed event indicator (o_i=0: event did not
occur, o_i=1: event occured). The forecasts are supplied as the
right-hand-side of a CDF interval, e.g., forecast <= 10 MW at time i, and
therefore o_i is defined as:
o_i = 1 if obs_i <= fx_i, else o_i = 0
where fx_i and obs_i are the forecast and observation at time i,
respectively.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
bs : float
The Brier Score [unitless], bounded between 0 and 1, where values
closer to 0 indicate better forecast performance and values closer to 1
indicate worse performance.
Notes
-----
The Brier Score implemented in this function is for binary outcomes only,
rather than the more general (but less commonly used) categorical version.
"""
# event: 0=did not happen, 1=did happen
o = np.where(obs <= fx, 1.0, 0.0)
# forecast probabilities [unitless]
f = fx_prob / 100.0
bs = np.mean((f - o) ** 2)
return bs
def brier_skill_score(obs, fx, fx_prob, ref, ref_prob):
"""Brier Skill Score (BSS).
BSS = 1 - BS_fx / BS_ref
where BS_fx is the Brier Score of the evaluated forecast and BS_ref is the
Brier Score of a reference forecast.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
ref : (n,) array_like
Reference forecast (physical units) of the right-hand-side of a CDF
interval.
ref_prob : (n,) array_like
Probability [%] associated with the reference forecast.
Returns
-------
skill : float
The Brier Skill Score [unitless].
"""
bs_fx = brier_score(obs, fx, fx_prob)
bs_ref = brier_score(obs, ref, ref_prob)
skill = 1.0 - bs_fx / bs_ref
return skill
def quantile_score(obs, fx, fx_prob):
"""Quantile Score (QS).
.. math::
\\text{QS} = \\frac{1}{n} \\sum_{i=1}^n (fx_i - obs_i) * (p - 1\\{obs_i > fx_i\\})
where :math:`n` is the number of forecasts, :math:`obs_i` is an
observation, :math:`fx_i` is a forecast, :math:`1\\{obs_i > fx_i\\}` is an
indicator function (1 if :math:`obs_i > fx_i`, 0 otherwise) and :math:`p`
is the probability that :math:`obs_i <= fx_i`. [1]_ [2]_
If :math:`obs > fx`, then we have:
.. math::
(fx - obs) < 0 \\\\
(p - 1\\{obs > fx\\}) = (p - 1) <= 0 \\\\
(fx - obs) * (p - 1) >= 0
If instead :math:`obs < fx`, then we have:
.. math::
(fx - obs) > 0 \\\\
(p - 1\\{obs > fx\\}) = (p - 0) >= 0 \\\\
(fx - obs) * p >= 0
Therefore, the quantile score is non-negative regardless of the obs and fx.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
qs : float
The Quantile Score, with the same units as the observations.
Notes
-----
Quantile score is meant to be computed for a single probability of
:math:`n` samples.
Examples
--------
>>> obs = 100 # observation [MW]
>>> fx = 80 # forecast [MW]
>>> fx_prob = 60 # probability [%]
>>> quantile_score(obs, fx, fx_prob) # score [MW]
8.0
References
----------
.. [1] <NAME> <NAME>. (1978) "Regression Quantiles", Econometrica
46 (1), pp. 33-50. doi: 10.2307/1913643
.. [2] Wilks (2020) "Forecast Verification". In "Statistical Methods in the
Atmospheric Sciences" (3rd edition). Academic Press. ISBN: 9780123850225
""" # NOQA: E501,W605
# Prob(obs <= fx) = p
p = fx_prob / 100.0
qs = np.mean((fx - obs) * (p - np.where(obs > fx, 1.0, 0.0)))
return qs
def quantile_skill_score(obs, fx, fx_prob, ref, ref_prob):
"""Quantile Skill Score (QSS).
.. math::
\\text{QSS} = 1 - \\text{QS}_{\\text{fx}} / \\text{QS}_{\\text{ref}}
where :math:`\\text{QS}_{\\text{fx}}` is the Quantile Score of the
evaluated forecast and :math:`\\text{QS}_{\\text{ref}}` is the Quantile
Score of a reference forecast. [1]_
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
ref : (n,) array_like
Reference forecast (physical units) of the right-hand-side of a CDF
interval.
ref_prob : (n,) array_like
Probability [%] associated with the reference forecast.
Returns
-------
skill : float
The Quantile Skill Score [unitless].
References
----------
.. [1] Bouallegue, <NAME> Friederichs (2015) "Quantile forecast
discrimination ability and value", Quarterly Journal of the Royal
Meteorological Society 141, pp. 3415-3424. doi: 10.1002/qj.2624
Notes
-----
This function returns 0 if QS_fx and QS_ref are both 0.
See Also
--------
:py:func:`solarforecastarbiter.metrics.probabilistic.quantile_score`
"""
qs_fx = quantile_score(obs, fx, fx_prob)
qs_ref = quantile_score(obs, ref, ref_prob)
# avoid 0 / 0 --> nan
if qs_fx == qs_ref:
return 0.0
elif qs_ref == 0.0:
# avoid divide by 0
# typically caused by deadbands and short time periods
return np.NINF
else:
return 1.0 - qs_fx / qs_ref
def _unique_forecasts(f):
"""Convert forecast probabilities to a set of unique values.
Determine a set of unique forecast probabilities, based on input forecast
probabilities of arbitrary precision, and approximate the input
probabilities to lie within the set of unique values.
Parameters
----------
f : (n,) array_like
Probability [unitless] associated with the forecasts.
Returns
-------
f_uniq : (n,) array_like
The converted forecast probabilities [unitless].
Notes
-----
This implementation determines the set of unique forecast probabilities by
rounding the input probabilities to a precision determined by the number of
input probability values: if less than 1000 samples, bin by tenths;
otherwise bin by hundredths.
Examples
--------
>>> f = np.array([0.1234, 0.156891, 0.10561])
>>> _unique_forecasts(f)
array([0.1, 0.2, 0.1])
"""
if len(f) >= 1000:
n_decimals = 2 # bin by hundredths (0.01, 0.02, etc.)
else:
n_decimals = 1 # bin by tenths (0.1, 0.2, etc.)
f_uniq = np.around(f, decimals=n_decimals)
return f_uniq
def brier_decomposition(obs, fx, fx_prob):
"""The 3-component decomposition of the Brier Score.
BS = REL - RES + UNC
where REL is the reliability, RES is the resolution and UNC is the
uncertatinty.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
rel : float
The reliability of the forecast [unitless], where a perfectly reliable
forecast has value of 0.
res : float
The resolution of the forecast [unitless], where higher values are
better.
unc : float
The uncertainty [unitless], where lower values indicate the event being
forecasted occurs rarely.
Notes
-----
The current implementation iterates over the unique forecasts to compute
the reliability and resolution, rather than using a vectorized formulation.
While a vectorized formulation may be more computationally efficient, the
clarity of the iterate version outweighs the efficiency gains from the
vectorized version. Additionally, the number of unique forecasts is
currently capped at 100, which small enough that there is likely no
practical difference in computation time between the iterate vs vectorized
versions.
"""
# event: 0=did not happen, 1=did happen
o = np.where(obs <= fx, 1.0, 0.0)
# forecast probabilities [unitless]
f = fx_prob / 100.0
# get unique forecast probabilities by binning
f = _unique_forecasts(f)
# reliability and resolution
rel, res = 0.0, 0.0
o_avg = np.mean(o)
for f_i, N_i in np.nditer(np.unique(f, return_counts=True)):
o_i = np.mean(o[f == f_i]) # mean event value per set
rel += N_i * (f_i - o_i) ** 2
res += N_i * (o_i - o_avg) ** 2
rel /= len(f)
res /= len(f)
# uncertainty
base_rate = np.mean(o)
unc = base_rate * (1.0 - base_rate)
return rel, res, unc
def reliability(obs, fx, fx_prob):
"""Reliability (REL) of the forecast.
REL = 1/n sum_{i=1}^I N_i (f_i - o_{i,avg})^2
where n is the total number of forecasts, I is the number of unique
forecasts (f_1, f_2, ..., f_I), N_i is the number of times each unique
forecast occurs, o_{i,avg} is the average of the observed events during
which the forecast was f_i.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
rel : float
The reliability of the forecast [unitless], where a perfectly reliable
forecast has value of 0.
See Also
--------
brier_decomposition : 3-component decomposition of the Brier Score
"""
rel = brier_decomposition(obs, fx, fx_prob)[0]
return rel
def resolution(obs, fx, fx_prob):
"""Resolution (RES) of the forecast.
RES = 1/n sum_{i=1}^I N_i (o_{i,avg} - o_{avg})^2
where n is the total number of forecasts, I is the number of unique
forecasts (f_1, f_2, ..., f_I), N_i is the number of times each unique
forecast occurs, o_{i,avg} is the average of the observed events during
which the forecast was f_i, and o_{avg} is the average of all observed
events.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
res : float
The resolution of the forecast [unitless], where higher values are
better.
See Also
--------
brier_decomposition : 3-component decomposition of the Brier Score
"""
res = brier_decomposition(obs, fx, fx_prob)[1]
return res
def uncertainty(obs, fx, fx_prob):
"""Uncertainty (UNC) of the forecast.
UNC = base_rate * (1 - base_rate)
where base_rate = 1/n sum_{i=1}^n o_i, and o_i is the observed event.
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n,) array_like
Forecasts (physical units) of the right-hand-side of a CDF interval,
e.g., fx = 10 MW is interpreted as forecasting <= 10 MW.
fx_prob : (n,) array_like
Probability [%] associated with the forecasts.
Returns
-------
unc : float
The uncertainty [unitless], where lower values indicate the event being
forecasted occurs rarely.
See Also
--------
brier_decomposition : 3-component decomposition of the Brier Score
"""
unc = brier_decomposition(obs, fx, fx_prob)[2]
return unc
def sharpness(fx_lower, fx_upper):
"""Sharpness (SH).
SH = 1/n sum_{i=1}^n (f_{u,i} - f_{l,i})
where n is the total number of forecasts, f_{u,i} is the upper prediction
interval value and f_{l,i} is the lower prediction interval value for
sample i.
Parameters
----------
fx_lower : (n,) array_like
The lower prediction interval values (physical units).
fx_upper : (n,) array_like
The upper prediction interval values (physical units).
Returns
-------
SH : float
The sharpness (physical units), where smaller sharpness values indicate
"tighter" prediction intervals.
"""
sh = np.mean(fx_upper - fx_lower)
return sh
def continuous_ranked_probability_score(obs, fx, fx_prob):
"""Continuous Ranked Probability Score (CRPS).
.. math::
\\text{CRPS} = \\frac{1}{n} \\sum_{i=1}^n \\int_{-\\infty}^{\\infty}
(F_i(x) - \\mathbf{1} \\{x \\geq y_i \\})^2 dx
where :math:`F_i(x)` is the CDF of the forecast at time :math:`i`,
:math:`y_i` is the observation at time :math:`i`, and :math:`\\mathbf{1}`
is the indicator function that transforms the observation into a step
function (1 if :math:`x \\geq y`, 0 if :math:`x < y`). In other words, the
CRPS measures the difference between the forecast CDF and the empirical CDF
of the observation. The CRPS has the same units as the observation. Lower
CRPS values indicate more accurate forecasts, where a CRPS of 0 indicates a
perfect forecast. [1]_ [2]_ [3]_
Parameters
----------
obs : (n,) array_like
Observations (physical unit).
fx : (n, d) array_like
Forecasts (physical units) of the right-hand-side of a CDF with d
intervals (d >= 2), e.g., fx = [10 MW, 20 MW, 30 MW] is interpreted as
<= 10 MW, <= 20 MW, <= 30 MW.
fx_prob : (n, d) array_like
Probability [%] associated with the forecasts.
Returns
-------
crps : float
The Continuous Ranked Probability Score, with the same units as the
observation.
Raises
------
ValueError
If the forecasts have incorrect dimensions; either a) the forecasts are
for a single sample (n=1) with d CDF intervals but are given as a 1D
array with d values or b) the forecasts are given as 2D arrays (n,d)
but do not contain at least 2 CDF intervals (i.e. d < 2).
Notes
-----
The CRPS can be calculated analytically when the forecast CDF is of a
continuous parametric distribution, e.g., Gaussian distribution. However,
since the Solar Forecast Arbiter makes no assumptions regarding how a
probabilistic forecast was generated, the CRPS is instead calculated using
numerical integration of the discretized forecast CDF. Therefore, the
accuracy of the CRPS calculation is limited by the precision of the
forecast CDF. In practice, this means the forecast CDF should 1) consist of
at least 10 intervals and 2) cover probabilities from 0% to 100%.
References
----------
.. [1] <NAME> (1976) "Scoring rules for continuous
probability distributions." Management Science, vol. 22, pp.
1087-1096. doi: 10.1287/mnsc.22.10.1087
.. [2] Hersbach (2000) "Decomposition of the continuous ranked probability
score for ensemble prediction systems." Weather Forecast, vol. 15,
pp. 559-570. doi: 10.1175/1520-0434(2000)015<0559:DOTCRP>2.0.CO;2
.. [3] Wilks (2019) "Statistical Methods in the Atmospheric Sciences", 4th
ed. Oxford; Waltham, MA; Academic Press.
"""
# match observations to fx shape: (n,) => (n, d)
if np.ndim(fx) < 2:
raise ValueError("forecasts must be 2D arrays (expected (n,d), got"
f"{np.shape(fx)})")
elif np.shape(fx)[1] < 2:
raise ValueError("forecasts must have d >= 2 CDF intervals "
f"(expected >= 2, got {np.shape(fx)[1]})")
n = len(fx)
# extend CDF min to ensure obs within forecast support
# fx.shape = (n, d) ==> (n, d + 1)
fx_min = np.minimum(obs, fx[:, 0])
fx = np.hstack([fx_min[:, np.newaxis], fx])
fx_prob = np.hstack([ | np.zeros([n, 1]) | numpy.zeros |
"""PyMC3-ArviZ conversion code."""
import logging
import warnings
from typing import ( # pylint: disable=unused-import
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Union,
)
import numpy as np
import xarray as xr
from aesara.graph.basic import Constant
from aesara.tensor.sharedvar import SharedVariable
from aesara.tensor.subtensor import AdvancedIncSubtensor
from arviz import InferenceData, concat, rcParams
from arviz.data.base import CoordSpec, DimSpec
from arviz.data.base import dict_to_dataset as _dict_to_dataset
from arviz.data.base import generate_dims_coords, make_attrs, requires
import pymc3
from pymc3.aesaraf import extract_obs_data
from pymc3.distributions import logpt
from pymc3.model import modelcontext
from pymc3.util import get_default_varnames
if TYPE_CHECKING:
from typing import Set # pylint: disable=ungrouped-imports
from pymc3.backends.base import MultiTrace # pylint: disable=invalid-name
from pymc3.model import Model
___all__ = [""]
_log = logging.getLogger("pymc3")
# random variable object ...
Var = Any # pylint: disable=invalid-name
class _DefaultTrace:
"""
Utility for collecting samples into a dictionary.
Name comes from its similarity to ``defaultdict``:
entries are lazily created.
Parameters
----------
samples : int
The number of samples that will be collected, per variable,
into the trace.
Attributes
----------
trace_dict : Dict[str, np.ndarray]
A dictionary constituting a trace. Should be extracted
after a procedure has filled the `_DefaultTrace` using the
`insert()` method
"""
trace_dict: Dict[str, np.ndarray] = {}
_len: Optional[int] = None
def __init__(self, samples: int):
self._len = samples
self.trace_dict = {}
def insert(self, k: str, v, idx: int):
"""
Insert `v` as the value of the `idx`th sample for the variable `k`.
Parameters
----------
k: str
Name of the variable.
v: anything that can go into a numpy array (including a numpy array)
The value of the `idx`th sample from variable `k`
ids: int
The index of the sample we are inserting into the trace.
"""
value_shape = np.shape(v)
# initialize if necessary
if k not in self.trace_dict:
array_shape = (self._len,) + value_shape
self.trace_dict[k] = np.empty(array_shape, dtype=np.array(v).dtype)
# do the actual insertion
if value_shape == ():
self.trace_dict[k][idx] = v
else:
self.trace_dict[k][idx, :] = v
def dict_to_dataset(
data,
library=None,
coords=None,
dims=None,
attrs=None,
default_dims=None,
skip_event_dims=None,
index_origin=None,
):
"""Temporal workaround for dict_to_dataset.
Once ArviZ>0.11.2 release is available, only two changes are needed for everything to work.
1) this should be deleted, 2) dict_to_dataset should be imported as is from arviz, no underscore,
also remove unnecessary imports
"""
if default_dims is None:
return _dict_to_dataset(
data, library=library, coords=coords, dims=dims, skip_event_dims=skip_event_dims
)
else:
out_data = {}
for name, vals in data.items():
vals = np.atleast_1d(vals)
val_dims = dims.get(name)
val_dims, coords = generate_dims_coords(vals.shape, name, dims=val_dims, coords=coords)
coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}
out_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=out_data, attrs=make_attrs(library=library))
class InferenceDataConverter: # pylint: disable=too-many-instance-attributes
"""Encapsulate InferenceData specific logic."""
model = None # type: Optional[Model]
nchains = None # type: int
ndraws = None # type: int
posterior_predictive = None # Type: Optional[Mapping[str, np.ndarray]]
predictions = None # Type: Optional[Mapping[str, np.ndarray]]
prior = None # Type: Optional[Mapping[str, np.ndarray]]
def __init__(
self,
*,
trace=None,
prior=None,
posterior_predictive=None,
log_likelihood=True,
predictions=None,
coords: Optional[CoordSpec] = None,
dims: Optional[DimSpec] = None,
model=None,
save_warmup: Optional[bool] = None,
density_dist_obs: bool = True,
index_origin: Optional[int] = None,
):
self.save_warmup = rcParams["data.save_warmup"] if save_warmup is None else save_warmup
self.trace = trace
# this permits us to get the model from command-line argument or from with model:
self.model = modelcontext(model)
self.attrs = None
if trace is not None:
self.nchains = trace.nchains if hasattr(trace, "nchains") else 1
if hasattr(trace.report, "n_draws") and trace.report.n_draws is not None:
self.ndraws = trace.report.n_draws
self.attrs = {
"sampling_time": trace.report.t_sampling,
"tuning_steps": trace.report.n_tune,
}
else:
self.ndraws = len(trace)
if self.save_warmup:
warnings.warn(
"Warmup samples will be stored in posterior group and will not be"
" excluded from stats and diagnostics."
" Do not slice the trace manually before conversion",
UserWarning,
)
self.ntune = len(self.trace) - self.ndraws
self.posterior_trace, self.warmup_trace = self.split_trace()
else:
self.nchains = self.ndraws = 0
self.prior = prior
self.posterior_predictive = posterior_predictive
self.log_likelihood = log_likelihood
self.predictions = predictions
self.index_origin = rcParams["data.index_origin"] if index_origin is None else index_origin
def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray:
return next(iter(dct.values()))
if trace is None:
# if you have a posterior_predictive built with keep_dims,
# you'll lose here, but there's nothing I can do about that.
self.nchains = 1
get_from = None
if predictions is not None:
get_from = predictions
elif posterior_predictive is not None:
get_from = posterior_predictive
elif prior is not None:
get_from = prior
if get_from is None:
# pylint: disable=line-too-long
raise ValueError(
"When constructing InferenceData must have at least"
" one of trace, prior, posterior_predictive or predictions."
)
aelem = arbitrary_element(get_from)
self.ndraws = aelem.shape[0]
self.coords = {} if coords is None else coords
if hasattr(self.model, "coords"):
self.coords = {**self.model.coords, **self.coords}
self.coords = {key: value for key, value in self.coords.items() if value is not None}
self.dims = {} if dims is None else dims
if hasattr(self.model, "RV_dims"):
model_dims = {
var_name: [dim for dim in dims if dim is not None]
for var_name, dims in self.model.RV_dims.items()
}
self.dims = {**model_dims, **self.dims}
self.density_dist_obs = density_dist_obs
self.observations = self.find_observations()
def find_observations(self) -> Optional[Dict[str, Var]]:
"""If there are observations available, return them as a dictionary."""
if self.model is None:
return None
observations = {}
for obs in self.model.observed_RVs:
aux_obs = getattr(obs.tag, "observations", None)
if aux_obs is not None:
try:
obs_data = extract_obs_data(aux_obs)
observations[obs.name] = obs_data
except TypeError:
warnings.warn(f"Could not extract data from symbolic observation {obs}")
else:
warnings.warn(f"No data for observation {obs}")
return observations
def split_trace(self) -> Tuple[Union[None, "MultiTrace"], Union[None, "MultiTrace"]]:
"""Split MultiTrace object into posterior and warmup.
Returns
-------
trace_posterior: MultiTrace or None
The slice of the trace corresponding to the posterior. If the posterior
trace is empty, None is returned
trace_warmup: MultiTrace or None
The slice of the trace corresponding to the warmup. If the warmup trace is
empty or ``save_warmup=False``, None is returned
"""
trace_posterior = None
trace_warmup = None
if self.save_warmup and self.ntune > 0:
trace_warmup = self.trace[: self.ntune]
if self.ndraws > 0:
trace_posterior = self.trace[self.ntune :]
return trace_posterior, trace_warmup
def log_likelihood_vals_point(self, point, var, log_like_fun):
"""Compute log likelihood for each observed point."""
# TODO: This is a cheap hack; we should filter-out the correct
# variables some other way
point = {i.name: point[i.name] for i in log_like_fun.f.maker.inputs if i.name in point}
log_like_val = np.atleast_1d(log_like_fun(point))
if isinstance(var.owner.op, AdvancedIncSubtensor):
try:
obs_data = extract_obs_data(var.tag.observations)
except TypeError:
warnings.warn(f"Could not extract data from symbolic observation {var}")
mask = obs_data.mask
if | np.ndim(mask) | numpy.ndim |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plots
plotrange, Btau, Ctau, ellipse, SUE
plotool:
set_clib, set_fig, set_ax,
reset_handles, append_handles, get_handles, set_legend,
plot, eplot, save, show, close
pplot(plotool):
add_plot, add_legend
"""
import warnings
from astropy import units as u
import numpy as np
from scipy import optimize
# import matplotlib as mpl
from matplotlib.ticker import (
NullFormatter, ScalarFormatter, LogFormatter,
LogFormatterExponent, LogFormatterSciNotation,
PercentFormatter
)
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
## Local
from utilities import InputError, merge_aliases
from arrays import arrayize, ramp
# cmap = mpl.cm.viridis
# norm = mpl.colors.Normalize(vmin=0, vmax=1)
##------------------------------
## Automatic plot range setting
##------------------------------
def plotrange(x,y,xran=None,yran=None,xlog=False,ylog=False,mask=None, \
errx=None,erry=None,xisln=False,yisln=False):
'''
Automatically sets the x and y ranges for (X,Y) plots, based on the entered
data set.
Copyright: <NAME>
'''
# Check the imput
N = np.size(x)
if (np.size(y) != N):
UT.strike('plotrange','x and y should have the same size.')
xran = arrayize(xran,N=2)
yran = arrayize(yran,N=2)
# X error bar settings
if errx is not None:
if (np.isscalar(errx)): errx = np.array([errx])
sex = np.shape(errx)
if (len(sex) == 2):
if (sex != (2,N) ): UT.strike('plotrange','wrong size for errx.')
elif (len(sex) == 1):
if (sex != (N,) ): UT.strike('plotrange','wrong size for errx.')
errx = np.array([errx,errx])
else:
errx = np.zeros((2,N))
# Y error bar settings
if erry is not None:
if (np.isscalar(erry)): erry = np.array([erry])
sey = np.shape(erry)
if (len(sey) == 2):
if (sey != (2,N) ): UT.strike('plotrange','wrong size for erry.')
elif (len(sey) == 1):
if (sey != (N,) ): UT.strike('plotrange','wrong size for erry.')
erry = np.array([erry,erry])
else:
erry = np.zeros((2,N))
# Homogenize the arrays and account for errors
xlow = np.array(x,dtype=float).flatten() - errx[0,:]
xhigh = xlow + errx[1,:]
ylow = np.array(y,dtype=float).flatten() - erry[0,:]
yhigh = ylow + erry[1,:]
# Lin/Log
if (xisln): xlow, xhigh = np.exp(xlow), np.exp(xhigh)
if (yisln): ylow, yhigh = np.exp(ylow), np.exp(yhigh)
# Mask
mask = arrayize(mask,default=True,N=N)
if (xlog): mask = ( mask & (xlow > 0) & (xhigh > 0) )
if (ylog): mask = ( mask & (ylow > 0) & (yhigh > 0) )
if (xran[0] != None): mask = ( mask & (xlow >= xran[0]) )
if (xran[1] != None): mask = ( mask & (xhigh <= xran[1]) )
if (yran[0] != None): mask = ( mask & (ylow >= yran[0]) )
if (yran[1] != None): mask = ( mask & (yhigh <= yran[1]) )
# Plain range
xran = np.array([ np.min(xlow[mask]), np.max(xhigh[mask]) ])
yran = np.array([ np.min(ylow[mask]), np.max(yhigh[mask]) ])
# Add aesthetical margins
fracmarg = 0.03
if (not xlog):
dxr = xran[1] - xran[0]
xran += ( dxr*(-fracmarg), dxr*fracmarg )
else:
dxr = xran[1] / xran[0]
xran *= ( dxr**(-fracmarg), dxr**fracmarg )
if (not ylog):
dyr = yran[1] - yran[0]
yran += ( dyr*(-fracmarg), dyr*fracmarg )
else:
dyr = yran[1] / yran[0]
yran *= ( dyr**(-fracmarg), dyr**fracmarg )
# Output
return(xran,yran)
##-----------------------------------------------
##
## Plotting functions for ellipses and SUEs
##
## (Copyright: <NAME>)
##
##-----------------------------------------------
## Function for SUEs
def Btau(tau):
return( (1-2/np.pi)*(tau-1)**2 + tau )
## Function for SUEs
def Ctau(tau):
return( ( (4/np.pi-1)*tau**2 + (3-8/np.pi)*tau + 4/np.pi-1 ) * (tau-1) )
## Ellipse
def ellipse(xmean=None,ymean=None,xstdev=None,ystdev=None,rho=None, \
xmin=None,xmax=None,ymin=None,ymax=None,Npt=300, \
xisln=False,yisln=False):
"""
UNCERTAINTY ELLIPSES
Function to plot uncertainty ellipses (or 1 sigma contour of a bivariate
normal distribution). The parameters are the means (xmean,ymean), the
standard deviations (xstdev,ystdev) and the correlation coefficients (rho).
The optional bounds (xmin,xmax,ymin,ymax) have the effect of truncating the
ellipses in case there is a range of parameter space that is forbidden.
It is important to notice that the xisln/yisln parameters are not related to
the log settings of the axes where we plot the ellipse, but are here to
indicate that the moments of the variable to plot correspond to the natural
logarithm (ln) of the variable we want to display. For instance, for
displaying the ellipses of (x,y) where, for x, the moments are those of lnx,
we would write:
ellipse(xmean=mean_of_lnx,ymean=mean_of_y,xstdev=stdev_of_lnx, \
ystdev=stdev_of_y,rho=correl_coeff_of_lnx_and_y,xisln=True)
"""
x = ramp(x0=xmean-xstdev*(1-1.E-5),x1=xmean+xstdev*(1-1.E-5),N=Npt)
c1 = rho * (x-xmean)/xstdev
c2 = np.sqrt( (1-rho**2) * (1-(x-xmean)**2/xstdev**2) )
y1 = ystdev * ( c1 - c2 ) + ymean
y2 = ystdev * ( c1 + c2 ) + ymean
xplot = np.concatenate((x,x[::-1],[x[0]]))
yplot = np.concatenate((y1,y2[::-1],[y1[0]]))
if (xisln): xplot = np.exp(xplot)
if (yisln): yplot = np.exp(yplot)
if (xmin != None): xplot[xplot < xmin] = xmin
if (xmax != None): xplot[xplot > xmax] = xmax
if (ymin != None): yplot[yplot < ymin] = ymin
if (ymax != None): yplot[yplot > ymax] = ymax
return(xplot,yplot)
## SUEs (1 sigma contour of a bivariate split-normal distribution)
def SUE(xmean=None,ymean=None,xstdev=None,ystdev=None,rho=None, \
xskew=None,yskew=None,xmin=None,xmax=None,ymin=None,ymax=None, \
Npt=300,xisln=False,yisln=False):
"""
SKEWED UNCERTAINTY ELLIPSES (SUE)
Function to plot uncertainty SUEs (or 1 sigma contour of a bivariate
split-normal distribution). The parameters are the means (xmean,ymean), the
standard deviations (xstdev,ystdev), the skewnesses (xskew,yskew) and the
correlation coefficients (rho). The optional bounds (xmin,xmax,ymin,ymax)
have the effect of truncating the SUEs in case there is a range of
parameter space that is forbidden.
It is important to notice that the xisln/yisln parameters are not related to
the log settings of the axes where we plot the SUE, but are here to
indicate that the moments of the variable to plot correspond to the natural
logarithm (ln) of the variable we want to display. For instance, for
displaying the ellipses of (x,y) where, for x, the moments are those of lnx,
we would write:
SUE(xmean=mean_of_lnx,ymean=mean_of_y,xstdev=stdev_of_lnx, \
ystdev=stdev_of_y,xskew=skewness_of_lnx,yskew=skewness_of_y, \
rho=correl_coeff_of_lnx_and_y,xisln=True)
"""
# Rotation angle
theta = 1./2 * np.arctan( 2*rho*xstdev*ystdev / (xstdev**2-ystdev**2) )
# Numerically solve for taux and tauy (tau=1.D2 ==> skew=0.99)
taugrid = ramp(N=10000,x0=1.E-2,x1=1.E2,log=True)
Ax = np.sqrt(np.pi/2) \
* ( (np.cos(theta))**3*xskew*xstdev**3 \
+ (np.sin(theta))**3*yskew*ystdev**3 ) \
/ ( (np.sin(theta))**6 + (np.cos(theta))**6 ) \
* ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \
/ ( (np.cos(theta))**2*xstdev**2 \
- (np.sin(theta))**2*ystdev**2 ) )**1.5
Ay = np.sqrt(np.pi/2) \
* ( (np.cos(theta))**3*yskew*ystdev**3 \
- (np.sin(theta))**3*xskew*xstdev**3 ) \
/ ( (np.cos(theta))**6 + (np.sin(theta))**6 ) \
* ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \
/ ( (np.cos(theta))**2*ystdev**2 \
- (np.sin(theta))**2*xstdev**2 ) )**1.5
taux = np.exp(np.interp(Ax,Ctau(taugrid)/(Btau(taugrid))**1.5, \
np.log(taugrid)))
tauy = np.exp(np.interp(Ay,Ctau(taugrid)/(Btau(taugrid))**1.5, \
np.log(taugrid)))
if (not np.isfinite(taux) or taux > 1.E2): taux = 1.E2
if (not np.isfinite(tauy) or tauy > 1.E2): tauy = 1.E2
# Rest of the parameters
lambdax = np.sqrt( ( (np.cos(theta))**2*xstdev**2 \
- (np.sin(theta))**2*ystdev**2 ) \
/ ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(taux) )
lambday = np.sqrt( ( (np.cos(theta))**2*ystdev**2 \
- (np.sin(theta))**2*xstdev**2 ) \
/ ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(tauy) )
x0 = xmean - np.sqrt(2/np.pi) * ( np.cos(theta)*lambdax*(taux-1) \
- np.sin(theta)*lambday*(tauy-1) )
y0 = ymean - np.sqrt(2/np.pi) * ( np.sin(theta)*lambdax*(taux-1) \
+ np.cos(theta)*lambday*(tauy-1) )
# Draw the SUE
matrot = np.array([ [ np.cos(theta), -np.sin(theta) ], \
[ np.sin(theta), np.cos(theta) ] ])
xell_ax1 = np.zeros(2)
yell_ax1 = np.zeros(2)
xell_ax2 = np.zeros(2)
yell_ax2 = np.zeros(2)
for k in np.arange(4):
if (k == 0):
xell_sub = ramp(N=Npt,x0=-lambdax,x1=0) + x0
rx = 1-(xell_sub-x0)**2/lambdax**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 1):
xell_sub = ramp(N=Npt,x0=0,x1=lambdax*taux) + x0
rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 2):
xell_sub = (ramp(N=Npt,x0=0,x1=lambdax*taux))[::-1] + x0
rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2
yell_sub = np.zeros(Npt)
yell_sub[rx >= 0] = lambday*tauy * np.sqrt(rx[rx >= 0]) + y0
yell_sub[rx < 0] = np.nan
elif (k == 3):
xell_sub = (ramp(N=Npt,x0=-lambdax,x1=0))[::-1] + x0
rx = 1-(xell_sub-x0)**2/lambdax**2
yell_sub = | np.zeros(Npt) | numpy.zeros |
""" PITCH MODULE TESTS
- Any advanced tests against the pitch analysis module methods will be contained here.
Advanced tests can vary, i.e. adding noise to the signal, harmonics, etc.
These are accuracy tests rather than unit tests, ensuring accuracy doesn't decrease,
between builds.
"""
import unittest
from numpy import sin, pi, arange
from numpy.random import normal
from rtmaii.analysis import pitch
from rtmaii.analysis import spectral
def generate_sine(frequency, sampling_rate, time_step):
""" Generates a basic sine wave for testing. """
return sin(2 * pi * frequency * time_step / sampling_rate)
class TestSuite(unittest.TestCase):
""" Advanced Test Suite for the pitch module.
This go further than basic unit tests, these test the pitch methods,
against a more complex signal.
However, including these as unit tests is useful,
as the accuracy shouldn't decrease in implementations.
So if these tests break, then the accuracy has reduced in a new iteration.
"""
def setUp(self):
""" Perform setup"""
self.sampling_rate = 44100 # standard sampling rate.
time_step = | arange(self.sampling_rate) | numpy.arange |
# coding: utf8
"""
指定された64x64の画像ファイル群をDatasetとして変換する。
-> ndarray: (N, 64, 64, 3)
"""
import pickle
import sys
from glob import glob
from PIL import Image
import numpy as np
from began.config import BEGANConfig
def main(image_dir):
config = BEGANConfig()
convert_images_to_dataset(config, image_dir)
def convert_images_to_dataset(config: BEGANConfig, image_dir: list):
files = glob("%s/*.jpg" % image_dir) # Images must be resize to 64x64
N = len(files)
array_list = []
for i in range(N):
filename = files[i]
im = Image.open(filename) # type: Image.Image
im_array = | np.array(im, dtype='uint8') | numpy.array |
"""
Group-wise function alignment using SRSF framework and Dynamic Programming
moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
import fdasrsf.utility_functions as uf
import fdasrsf.bayesian_functions as bf
import fdasrsf.fPCA as fpca
import fdasrsf.geometry as geo
from scipy.integrate import trapz, cumtrapz
from scipy.interpolate import interp1d
from scipy.linalg import svd, cholesky
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform, pdist
import GPy
from numpy.linalg import norm, inv
from numpy.random import rand, normal
from joblib import Parallel, delayed
from fdasrsf.fPLS import pls_svd
from tqdm import tqdm
import fdasrsf.plot_style as plot
import fpls_warp as fpls
import collections
class fdawarp:
"""
This class provides alignment methods for functional data using the SRVF framework
Usage: obj = fdawarp(f,t)
:param f: (M,N): matrix defining N functions of M samples
:param time: time vector of length M
:param fn: aligned functions
:param qn: aligned srvfs
:param q0: initial srvfs
:param fmean: function mean
:param mqn: mean srvf
:param gam: warping functions
:param psi: srvf of warping functions
:param stats: alignment statistics
:param qun: cost function
:param lambda: lambda
:param method: optimization method
:param gamI: inverse warping function
:param rsamps: random samples
:param fs: random aligned functions
:param gams: random warping functions
:param ft: random warped functions
:param qs: random aligned srvfs
:param type: alignment type
:param mcmc: mcmc output if bayesian
Author : <NAME> (JDT) <jdtuck AT sandia.gov>
Date : 15-Mar-2018
"""
def __init__(self, f, time):
"""
Construct an instance of the fdawarp class
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param time: vector of size M describing the sample points
"""
a = time.shape[0]
if f.shape[0] != a:
raise Exception('Columns of f and time must be equal')
self.f = f
self.time = time
self.rsamps = False
def srsf_align(self, method="mean", omethod="DP2", center=True,
smoothdata=False, MaxItr=20, parallel=False, lam=0.0,
cores=-1, grid_dim=7):
"""
This function aligns a collection of functions using the elastic
square-root slope (srsf) framework.
:param method: (string) warp calculate Karcher Mean or Median
(options = "mean" or "median") (default="mean")
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP2)
:param center: center warping functions (default = T)
:param smoothdata: Smooth the data using a box filter (default = F)
:param MaxItr: Maximum number of iterations (default = 20)
:param parallel: run in parallel (default = F)
:param lam: controls the elasticity (default = 0)
:param cores: number of cores for parallel (default = -1 (all))
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:type lam: double
:type smoothdata: bool
Examples
>>> import tables
>>> fun=tables.open_file("../Data/simu_data.h5")
>>> f = fun.root.f[:]
>>> f = f.transpose()
>>> time = fun.root.time[:]
>>> obj = fs.fdawarp(f,time)
>>> obj.srsf_align()
"""
M = self.f.shape[0]
N = self.f.shape[1]
self.lam = lam
if M > 500:
parallel = True
elif N > 100:
parallel = True
eps = np.finfo(np.double).eps
f0 = self.f
self.method = omethod
methods = ["mean", "median"]
self.type = method
# 0 mean, 1-median
method = [i for i, x in enumerate(methods) if x == method]
if len(method) == 0:
method = 0
else:
method = method[0]
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(self.time, self.f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
print("Initializing...")
mnq = q.mean(axis=1)
a = mnq.repeat(N)
d1 = a.reshape(M, N)
d = (q - d1) ** 2
dqq = np.sqrt(d.sum(axis=0))
min_ind = dqq.argmin()
mq = q[:, min_ind]
mf = f[:, min_ind]
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq, self.time,
q[:, n], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = np.zeros((M,N))
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq,self.time,q[:,k],omethod,lam,grid_dim)
gamI = uf.SqrtMeanInverse(gam)
mf = np.interp((self.time[-1] - self.time[0]) * gamI + self.time[0], self.time, mf)
mq = uf.f_to_srsf(mf, self.time)
# Compute Karcher Mean
if method == 0:
print("Compute Karcher Mean of %d function in SRSF space..." % N)
if method == 1:
print("Compute Karcher Median of %d function in SRSF space..." % N)
ds = np.repeat(0.0, MaxItr + 2)
ds[0] = np.inf
qun = np.repeat(0.0, MaxItr + 1)
tmp = np.zeros((M, MaxItr + 2))
tmp[:, 0] = mq
mq = tmp
tmp = np.zeros((M, MaxItr+2))
tmp[:,0] = mf
mf = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = self.f
f = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = q
q = tmp
for r in range(0, MaxItr):
print("updating step: r=%d" % (r + 1))
if r == (MaxItr - 1):
print("maximal number of iterations is reached")
# Matching Step
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r],
self.time, q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0],
omethod, lam, grid_dim)
gam_dev = np.zeros((M, N))
vtil = np.zeros((M,N))
dtil = np.zeros(N)
for k in range(0, N):
f[:, k, r + 1] = np.interp((self.time[-1] - self.time[0]) * gam[:, k]
+ self.time[0], self.time, f[:, k, 0])
q[:, k, r + 1] = uf.f_to_srsf(f[:, k, r + 1], self.time)
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
v = q[:, k, r + 1] - mq[:,r]
d = np.sqrt(trapz(v*v, self.time))
vtil[:,k] = v/d
dtil[k] = 1.0/d
mqt = mq[:, r]
a = mqt.repeat(N)
d1 = a.reshape(M, N)
d = (q[:, :, r + 1] - d1) ** 2
if method == 0:
d1 = sum(trapz(d, self.time, axis=0))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = qtemp.mean(axis=1)
mf[:, r + 1] = ftemp.mean(axis=1)
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if method == 1:
d1 = np.sqrt(sum(trapz(d, self.time, axis=0)))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
stp = .3
vbar = vtil.sum(axis=1)*(1/dtil.sum())
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = mq[:,r] + stp*vbar
tmp = np.zeros(M)
tmp[1:] = cumtrapz(mq[:, r + 1] * np.abs(mq[:, r + 1]), self.time)
mf[:, r + 1] = np.median(f0[1, :])+tmp
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if qun[r] < 1e-2 or r >= MaxItr:
break
# Last Step with centering of gam
if center:
r += 1
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r], self.time,
q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0], omethod,
lam, grid_dim)
gam_dev = np.zeros((M, N))
for k in range(0, N):
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
time0 = (self.time[-1] - self.time[0]) * gamI + self.time[0]
mq[:, r + 1] = np.interp(time0, self.time, mq[:, r]) * np.sqrt(gamI_dev)
for k in range(0, N):
q[:, k, r + 1] = np.interp(time0, self.time, q[:, k, r]) * np.sqrt(gamI_dev)
f[:, k, r + 1] = np.interp(time0, self.time, f[:, k, r])
gam[:, k] = np.interp(time0, self.time, gam[:, k])
else:
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
# Aligned data & stats
self.fn = f[:, :, r + 1]
self.qn = q[:, :, r + 1]
self.q0 = q[:, :, 0]
mean_f0 = f0.mean(axis=1)
std_f0 = f0.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
self.gam = gam
self.mqn = mq[:, r + 1]
tmp = np.zeros(M)
tmp[1:] = cumtrapz(self.mqn * np.abs(self.mqn), self.time)
self.fmean = np.mean(f0[1, :]) + tmp
fgam = np.zeros((M, N))
for k in range(0, N):
time0 = (self.time[-1] - self.time[0]) * gam[:, k] + self.time[0]
fgam[:, k] = np.interp(time0, self.time, self.fmean)
var_fgam = fgam.var(axis=1)
self.orig_var = trapz(std_f0 ** 2, self.time)
self.amp_var = trapz(std_fn ** 2, self.time)
self.phase_var = trapz(var_fgam, self.time)
return
def plot(self):
"""
plot plot functional alignment results
Usage: obj.plot()
"""
M = self.f.shape[0]
plot.f_plot(self.time, self.f, title="f Original Data")
fig, ax = plot.f_plot(np.arange(0, M) / float(M - 1), self.gam,
title="Warping Functions")
ax.set_aspect('equal')
plot.f_plot(self.time, self.fn, title="Warped Data")
mean_f0 = self.f.mean(axis=1)
std_f0 = self.f.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
tmp = np.array([mean_f0, mean_f0 + std_f0, mean_f0 - std_f0])
tmp = tmp.transpose()
plot.f_plot(self.time, tmp, title=r"Original Data: Mean $\pm$ STD")
tmp = np.array([mean_fn, mean_fn + std_fn, mean_fn - std_fn])
tmp = tmp.transpose()
plot.f_plot(self.time, tmp, title=r"Warped Data: Mean $\pm$ STD")
plot.f_plot(self.time, self.fmean, title="$f_{mean}$")
plt.show()
return
def gauss_model(self, n=1, sort_samples=False):
"""
This function models the functional data using a Gaussian model
extracted from the principal components of the srvfs
:param n: number of random samples
:param sort_samples: sort samples (default = T)
:type n: integer
:type sort_samples: bool
"""
fn = self.fn
time = self.time
qn = self.qn
gam = self.gam
# Parameters
eps = np.finfo(np.double).eps
binsize = np.diff(time)
binsize = binsize.mean()
M = time.size
# compute mean and covariance in q-domain
mq_new = qn.mean(axis=1)
mididx = np.round(time.shape[0] / 2)
m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
mqn = np.append(mq_new, m_new.mean())
qn2 = np.vstack((qn, m_new))
C = np.cov(qn2)
q_s = np.random.multivariate_normal(mqn, C, n)
q_s = q_s.transpose()
# compute the correspondence to the original function domain
fs = np.zeros((M, n))
for k in range(0, n):
fs[:, k] = uf.cumtrapzmid(time, q_s[0:M, k] * np.abs(q_s[0:M, k]),
np.sign(q_s[M, k]) * (q_s[M, k] ** 2),
mididx)
fbar = fn.mean(axis=1)
fsbar = fs.mean(axis=1)
err = np.transpose(np.tile(fbar-fsbar, (n,1)))
fs += err
# random warping generation
rgam = uf.randomGamma(gam, n)
gams = np.zeros((M, n))
for k in range(0, n):
gams[:, k] = uf.invertGamma(rgam[:, k])
# sort functions and warping
if sort_samples:
mx = fs.max(axis=0)
seq1 = mx.argsort()
# compute the psi-function
fy = np.gradient(rgam, binsize)
psi = fy / np.sqrt(abs(fy) + eps)
ip = np.zeros(n)
len = np.zeros(n)
for i in range(0, n):
tmp = np.ones(M)
ip[i] = tmp.dot(psi[:, i] / M)
len[i] = np.arccos(tmp.dot(psi[:, i] / M))
seq2 = len.argsort()
# combine x-variability and y-variability
ft = np.zeros((M, n))
for k in range(0, n):
ft[:, k] = np.interp(gams[:, seq2[k]], np.arange(0, M) /
np.double(M - 1), fs[:, seq1[k]])
tmp = np.isnan(ft[:, k])
while tmp.any():
rgam2 = uf.randomGamma(gam, 1)
ft[:, k] = np.interp(gams[:, seq2[k]], np.arange(0, M) /
np.double(M - 1), uf.invertGamma(rgam2))
else:
# combine x-variability and y-variability
ft = np.zeros((M, n))
for k in range(0, n):
ft[:, k] = np.interp(gams[:, k], np.arange(0, M) /
np.double(M - 1), fs[:, k])
tmp = np.isnan(ft[:, k])
while tmp.any():
rgam2 = uf.randomGamma(gam, 1)
ft[:, k] = np.interp(gams[:, k], np.arange(0, M) /
np.double(M - 1), uf.invertGamma(rgam2))
self.rsamps = True
self.fs = fs
self.gams = rgam
self.ft = ft
self.qs = q_s[0:M,:]
return
def joint_gauss_model(self, n=1, no=3):
"""
This function models the functional data using a joint Gaussian model
extracted from the principal components of the srsfs
:param n: number of random samples
:param no: number of principal components (default = 3)
:type n: integer
:type no: integer
"""
# Parameters
fn = self.fn
time = self.time
qn = self.qn
gam = self.gam
M = time.size
# Perform PCA
jfpca = fpca.fdajpca(self)
jfpca.calc_fpca(no=no)
s = jfpca.latent
U = jfpca.U
C = jfpca.C
mu_psi = jfpca.mu_psi
# compute mean and covariance
mq_new = qn.mean(axis=1)
mididx = jfpca.id
m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
mqn = np.append(mq_new, m_new.mean())
# generate random samples
vals = np.random.multivariate_normal(np.zeros(s.shape), np.diag(s), n)
tmp = np.matmul(U, np.transpose(vals))
qhat = np.tile(mqn.T,(n,1)).T + tmp[0:M+1,:]
tmp = np.matmul(U, np.transpose(vals)/C)
vechat = tmp[(M+1):,:]
psihat = np.zeros((M,n))
gamhat = np.zeros((M,n))
for ii in range(n):
psihat[:,ii] = geo.exp_map(mu_psi,vechat[:,ii])
gam_tmp = cumtrapz(psihat[:,ii]**2,np.linspace(0,1,M),initial=0.0)
gamhat[:,ii] = (gam_tmp - gam_tmp.min())/(gam_tmp.max()-gam_tmp.min())
ft = np.zeros((M,n))
fhat = np.zeros((M,n))
for ii in range(n):
fhat[:,ii] = uf.cumtrapzmid(time, qhat[0:M,ii]*np.fabs(qhat[0:M,ii]), np.sign(qhat[M,ii])*(qhat[M,ii]*qhat[M,ii]), mididx)
ft[:,ii] = uf.warp_f_gamma(np.linspace(0,1,M),fhat[:,ii],gamhat[:,ii])
self.rsamps = True
self.fs = fhat
self.gams = gamhat
self.ft = ft
self.qs = qhat[0:M,:]
return
def multiple_align_functions(self, mu, omethod="DP2", smoothdata=False,
parallel=False, lam=0.0, cores=-1, grid_dim=7):
"""
This function aligns a collection of functions using the elastic square-root
slope (srsf) framework.
Usage: obj.multiple_align_functions(mu)
obj.multiple_align_functions(lambda)
obj.multiple_align_functions(lambda, ...)
:param mu: vector of function to align to
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP)
:param smoothdata: Smooth the data using a box filter (default = F)
:param parallel: run in parallel (default = F)
:param lam: controls the elasticity (default = 0)
:param cores: number of cores for parallel (default = -1 (all))
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:type lam: double
:type smoothdata: bool
"""
M = self.f.shape[0]
N = self.f.shape[1]
self.lam = lam
if M > 500:
parallel = True
elif N > 100:
parallel = True
eps = np.finfo(np.double).eps
self.method = omethod
self.type = "multiple"
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(self.time, self.f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
mq = uf.f_to_srsf(mu, self.time)
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq, self.time,
q[:, n], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = np.zeros((M,N))
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq,self.time,q[:,k],omethod,lam,grid_dim)
self.gamI = uf.SqrtMeanInverse(gam)
fn = np.zeros((M,N))
qn = np.zeros((M,N))
for k in range(0, N):
fn[:, k] = np.interp((self.time[-1] - self.time[0]) * gam[:, k]
+ self.time[0], self.time, f[:, k])
qn[:, k] = uf.f_to_srsf(f[:, k], self.time)
# Aligned data & stats
self.fn = fn
self.qn = qn
self.q0 = q
mean_f0 = f.mean(axis=1)
std_f0 = f.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
self.gam = gam
self.mqn = mq
self.fmean = mu
fgam = np.zeros((M, N))
for k in range(0, N):
time0 = (self.time[-1] - self.time[0]) * gam[:, k] + self.time[0]
fgam[:, k] = np.interp(time0, self.time, self.fmean)
var_fgam = fgam.var(axis=1)
self.orig_var = trapz(std_f0 ** 2, self.time)
self.amp_var = trapz(std_fn ** 2, self.time)
self.phase_var = trapz(var_fgam, self.time)
return
def pairwise_align_functions(f1, f2, time, omethod="DP2", lam=0, grid_dim=7):
"""
This function aligns f2 to f1 using the elastic square-root
slope (srsf) framework.
Usage: out = pairwise_align_functions(f1, f2, time)
out = pairwise_align_functions(f1, f2, time, omethod, lam, grid_dim)
:param f1: vector defining M samples of function 1
:param f2: vector defining M samples of function 2
:param time: time vector of length M
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP)
:param lam: controls the elasticity (default = 0)
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:rtype list containing
:return f2n: aligned f2
:return gam: warping function
:return q2n: aligned q2 (srsf)
"""
q1 = uf.f_to_srsf(f1, time)
q2 = uf.f_to_srsf(f2, time)
gam = uf.optimum_reparam(q1, time, q2, omethod, lam, grid_dim)
f2n = uf.warp_f_gamma(time, f2 , gam)
q2n = uf.f_to_srsf(f2n, time)
return (f2n, gam, q2n)
def pairwise_align_bayes(f1i, f2i, time, mcmcopts=None):
"""
This function aligns two functions using Bayesian framework. It will align
f2 to f1. It is based on mapping warping functions to a hypersphere, and a
subsequent exponential mapping to a tangent space. In the tangent space,
the Z-mixture pCN algorithm is used to explore both local and global
structure in the posterior distribution.
The Z-mixture pCN algorithm uses a mixture distribution for the proposal
distribution, controlled by input parameter zpcn. The zpcn$betas must be
between 0 and 1, and are the coefficients of the mixture components, with
larger coefficients corresponding to larger shifts in parameter space. The
zpcn["probs"] give the probability of each shift size.
Usage: out = pairwise_align_bayes(f1i, f2i, time)
out = pairwise_align_bayes(f1i, f2i, time, mcmcopts)
:param f1i: vector defining M samples of function 1
:param f2i: vector defining M samples of function 2
:param time: time vector of length M
:param mcmopts: dict of mcmc parameters
:type mcmcopts: dict
default mcmc options:
tmp = {"betas":np.array([0.5,0.5,0.005,0.0001]),"probs":np.array([0.1,0.1,0.7,0.1])}
mcmcopts = {"iter":2*(10**4) ,"burnin":np.minimum(5*(10**3),2*(10**4)//2),
"alpha0":0.1, "beta0":0.1,"zpcn":tmp,"propvar":1,
"initcoef":np.repeat(0,20), "npoints":200, "extrainfo":True}
:rtype collection containing
:return f2_warped: aligned f2
:return gamma: warping function
:return g_coef: final g_coef
:return psi: final psi
:return sigma1: final sigma
if extrainfo
:return accept: accept of psi samples
:return betas_ind
:return logl: log likelihood
:return gamma_mat: posterior gammas
:return gamma_stats: posterior gamma stats
:return xdist: phase distance posterior
:return ydist: amplitude distance posterior)
"""
if mcmcopts is None:
tmp = {"betas":np.array([0.5,0.5,0.005,0.0001]),"probs":np.array([0.1,0.1,0.7,0.1])}
mcmcopts = {"iter":2*(10**4) ,"burnin":np.minimum(5*(10**3),2*(10**4)//2),"alpha0":0.1,
"beta0":0.1,"zpcn":tmp,"propvar":1,
"initcoef": | np.repeat(0,20) | numpy.repeat |
import unittest
import torch
from skimage.future import graph
from skimage.segmentation import slic
import numpy as np
import networkx as nx
from utils import triangle_to_edge_matrix, edge_to_node_matrix, tensor_to_sparse
from constants import TEST_CIFAR10_IMAGE_1, TEST_MNIST_IMAGE_1, TEST_MNIST_IMAGE_2
from Superpixel.ImageProcessor import ImageProcessor
from Superpixel.EdgeFlow import PixelBasedEdgeFlow
from skimage import color
from constants import DEVICE
class MyTestCase(unittest.TestCase):
def test_generating_utils_works_correctly(self):
# This test is a toy example based on Control Using Higher Order Laplacians in Network Topologies (2006)
# by <NAME>, <NAME>
nodes = [1, 2, 3, 4, 5]
edges = [(1, 2), (1, 3), (2, 3), (2, 4), (3, 4), (3, 5), (4, 5)]
triangles = [(2, 3, 4)]
b1 = edge_to_node_matrix(edges, nodes)
b2 = triangle_to_edge_matrix(triangles, edges)
L0_actual = [[2, -1, -1, 0, 0],
[-1, 3, -1, -1, 0],
[-1, -1, 4, -1, -1],
[0, -1, -1, 3, -1],
[0, 0, -1, -1, 2]]
L0_actual = torch.tensor(L0_actual, dtype=torch.float, device=DEVICE)
L1_actual = [[2, 1, -1, -1, 0, 0, 0],
[1, 2, 1, 0, -1, -1, 0],
[-1, 1, 3, 0, 0, -1, 0],
[-1, 0, 0, 3, 0, 0, -1],
[0, -1, 0, 0, 3, 1, -1],
[0, -1, -1, 0, 1, 2, 1],
[0, 0, 0, -1, -1, 1, 2]]
L1_actual = torch.tensor(L1_actual, dtype=torch.float, device=DEVICE)
L2_actual = torch.tensor([[3]], dtype=torch.float, device=DEVICE)
L0 = torch.matmul(b1, b1.t())
L1 = torch.matmul(b1.t(), b1) + torch.matmul(b2, b2.t())
L2 = torch.matmul(b2.t(), b2)
self.assertTrue(torch.all(torch.eq(L0, L0_actual)).item())
self.assertTrue(torch.all(torch.eq(L1, L1_actual)).item())
self.assertTrue(torch.all(torch.eq(L2, L2_actual)).item())
def test_sparse_mm_yields_same_result_as_dense_mm(self):
image = TEST_MNIST_IMAGE_1
image = np.array(image)
superpixel = slic(image, n_segments=100, compactness=0.75, start_label=1)
rag = graph.rag_mean_color(image, superpixel)
b1 = edge_to_node_matrix(rag.edges(), rag.nodes)
L0 = torch.matmul(b1, b1.T)
b1_coo = b1.to_sparse()
L0_coo = torch.sparse.mm(b1_coo, b1_coo.t())
L0_coo = L0_coo.to_dense()
self.assertTrue(torch.all(torch.eq(L0, L0_coo)).item())
def test_sparse_add_yields_same_result_as_dense_add(self):
image = TEST_MNIST_IMAGE_2
image = np.array(image)
superpixel = slic(image, n_segments=150, compactness=0.75, start_label=1)
rag = graph.rag_mean_color(image, superpixel)
triangles = [*filter(lambda x: len(x) == 3, nx.enumerate_all_cliques(rag))]
b1 = edge_to_node_matrix(rag.edges(), rag.nodes)
b2 = triangle_to_edge_matrix(triangles, rag.edges)
b1_coo, b2_coo = b1.to_sparse(), b2.to_sparse()
L1 = torch.matmul(b1.T, b1) + torch.matmul(b2, b2.T)
L1_coo = torch.sparse.FloatTensor.add(torch.sparse.mm(b1_coo.t(), b1_coo),
torch.sparse.mm(b2_coo, b2_coo.t()))
L1_coo = L1_coo.to_dense()
self.assertTrue(torch.all(torch.eq(L1, L1_coo)).item())
def test_Lapacian_0_generated_correctly(self):
image = TEST_MNIST_IMAGE_2
image = | np.array(image) | numpy.array |
# AUTOGENERATED! DO NOT EDIT! File to edit: 03_shape.ipynb (unless otherwise specified).
__all__ = ['getElemetType', 'tria_scheme', 'tetra_scheme', 'getGaussPoints', 'getShapeLine2', 'getShapeLine3',
'getShapeTria3', 'getShapeTria6', 'getShapeQuad4', 'getShapeQuad8', 'getShapeQuad9', 'getShapeTetra4',
'getShapeTetra10', 'getShapeHexa8', 'getShapeHexa20', 'getAllShapeFunctions']
# Cell
import numpy as np
from scipy.special.orthogonal import p_roots as gauss_scheme
np.set_printoptions(precision=4)
# Cell
def getElemetType(elemCoords):
"Determine the element type"
dict = {
"numDim_1": {
"numNodes_2": "Line2",
"numNodes_3": "Line3"
},
"numDim_2": {
"numNodes_3": "Tria3",
"numNodes_4": "Quad4",
"numNodes_6": "Tria6",
"numNodes_8": "Quad8",
"numNodes_9": "Quad9",
},
"numDim_3": {
"numNodes_4": "Tetra4",
"numNodes_8": "Hexa8",
"numNodes_10": "Tetra10",
"numNodes_20": "Hexa20"
},
}
try:
numNodes = elemCoords.shape[0]
numDim = elemCoords.shape[1] if elemCoords.shape[1] else 1
ElemType = dict.get(f"numDim_{numDim}").get(f"numNodes_{numNodes}")
if ElemType:
return ElemType
else:
raise NotImplementedError(
f"No {numDim}D element with {numNodes} nodes is available"
)
except NotImplementedError as error:
print(error)
except IndexError:
print("No valid coordinates array")
except AttributeError:
print("No valid coordinates array")
except TypeError:
print("No valid coordinates array")
# Cell
def tria_scheme(order):
if order is 1:
xi = [[1./3., 1./3.]]
weight = [[1.]]
elif order is 3:
r1 = 1./6.
r2 = 2./3.
w1 = 1./3.
xi = [[r1,r1],[r2,r1],[r1,r2]]
weight = [[w1],[w1],[w1]]
elif order is 4:
r1 = 1./5.
r2 = 3./5.
r3 = 1./3.
w1 = 0.52083333
w2 = 0.52083333
w3 = 0.52083333
w4 = -0.56250000
xi = [[r1,r1],[r2,r1],[r1,r2],[r3,r3]]
weight = [[w1],[w2],[w3],[w4]]
return xi, weight
# Cell
def tetra_scheme(order):
if order is 1:
xi = [[1./4., 1./4., 1./4.]]
weight = [[1.]]
elif order is 4:
r1 = 0.5854102
r2 = 0.1381966
w1 = 1./4.
xi = [[r1,r2,r2],[r2,r1,r2],[r2,r2,r1],[r2,r2,r2]]
weight = [[w1],[w1],[w1],[w1]]
elif order is 5:
r1 = 1./4.
r2 = 1./2.
r3 = 1./6.
w1 = 9./20.
w2 = -4./5.
xi = [[r2,r3,r3],[r3,r2,r3],[r3,r3,r2],[r3,r3,r3],[r1,r1,r1]]
weight = [[w1],[w1],[w1],[w1],[w2]]
return xi, weight
# Cell
def getGaussPoints(elemType, reduced=False):
point = []
weight = []
if "Line" in elemType:
stdOrder = 2 if "2" in elemType else 3
if reduced: stdOrder -= 1
ip, w = gauss_scheme(stdOrder)
point = [[ip[i]] for i in range(stdOrder)]
weight = [[w[i]] for i in range(stdOrder)]
elif "Tria" in elemType:
stdOrder = 1 if "3" in elemType else 4
if stdOrder == 4 and reduced: stdOrder = 3
point, weight = tria_scheme(stdOrder)
elif "Quad" in elemType:
stdOrder = 2 if "4" in elemType else 3
if reduced: stdOrder -= 1
ip, w = gauss_scheme(stdOrder)
point = [[ip[j], ip[i]] for i in range(stdOrder) for j in range(stdOrder)]
weight = [[w[j]*w[i]] for i in range(stdOrder) for j in range(stdOrder)]
elif "Tetra" in elemType:
stdOrder = 1 if "4" in elemType else 5
if stdOrder == 5 and reduced: stdOrder = 4
point, weight = tetra_scheme(stdOrder)
elif "Hexa" in elemType:
stdOrder = 2 if "8" in elemType else 3
if reduced: stdOrder -= 1
ip, w = gauss_scheme(stdOrder)
point = [[ip[k], ip[j], ip[i]] for i in range(stdOrder) for j in range(stdOrder) for k in range(stdOrder)]
weight = [[w[k]*w[j]*w[i]] for i in range(stdOrder) for j in range(stdOrder) for k in range(stdOrder)]
return np.array(point), np.array(weight)
# Cell
def getShapeLine2(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 1:
raise NotImplementedError("1D only")
############################################################################################################
# gauss points coords
xi = gaussPoint[0]
############################################################################################################
# Tuple with xi_a combinatory
xi_comb = [-1,1]
############################################################################################################
# Calculate shape functions
N = np.array([0.5*(1+sign*xi) for sign in xi_comb])
############################################################################################################
# Calculate derivatives of shape functions-> xi
dN = np.array([0.5*sign for sign in xi_comb])
return N, dN
# Cell
def getShapeLine3(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 1:
raise NotImplementedError("1D only")
############################################################################################################
# gauss points coords
xi = gaussPoint[0]
############################################################################################################
# Tuple with xi_a combinatory
xi_comb = [-1,1]
############################################################################################################
# Calculate shape functions
N_lateral = np.array([0.5*item*xi*(1+item*xi) for item in xi_comb])
N_middle = np.array([(1+xi)*(1-xi)])
N = np.hstack((N_lateral[0], N_middle, N_lateral[1]))
############################################################################################################
# Calculate derivatives of shape functions -> xi
dN_lateral = np.array([0.5*item*(1+2.*item*xi) for item in xi_comb])
dN_middle = np.array([-2.*xi])
dN = np.hstack((dN_lateral[0], dN_middle, dN_lateral[1]))
return N, dN
# Cell
def getShapeTria3(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 2:
raise NotImplementedError("2D only")
############################################################################################################
# gauss points coords
L1 = gaussPoint[0]
L2 = gaussPoint[1]
L3 = 1-L1-L2
############################################################################################################
# Calculate shape functions
N = np.array([L1, L2, L3])
############################################################################################################
# Calculate derivatives of shape functions-> xi
dN_dxi = np.array([1., 0., -1.])
# Calculate derivatives of shape functions-> eta
dN_deta = np.array([0., 1., -1.])
# Calculate derivatives of shape functions
dN = np.vstack((dN_dxi, dN_deta))
return N, dN
# Cell
def getShapeTria6(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 2:
raise NotImplementedError("2D only")
############################################################################################################
# gauss points coords
L1 = gaussPoint[0]
L2 = gaussPoint[1]
L3 = 1-gaussPoint[0]-gaussPoint[1]
############################################################################################################
# Calculate shape functions
N = np.array([L1*(2.*L1-1.), L2*(2.*L2-1.), L3*(2.*L3-1.), 4*L1*L2, 4*L2*L3, 4*L1*L3])
############################################################################################################
# Calculate derivatives of shape functions-> xi
dN_dxi = np.array([4.*L1-1, 0., -4.*L3+1, 4.*L2, -4.*L2, 4.*(L3-L1)])
# Calculate derivatives of shape functions-> eta
dN_deta = np.array([0., 4.*L2-1, -4.*L3+1, 4.*L1, 4.*(L3-L2), -4.*L1])
# Calculate derivatives of shape functions
dN = np.vstack((dN_dxi, dN_deta))
return N, dN
# Cell
def getShapeQuad4(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 2:
raise NotImplementedError("2D only")
############################################################################################################
# gauss points coords
xi = gaussPoint[0]
eta = gaussPoint[1]
############################################################################################################
# Tuple with xi_a and eta_a combinatory
xi_eta_comb = [(-1,-1),(1,-1),(1,1),(-1,1)]
############################################################################################################
# Calculate shape functions
N = np.array([0.25*(1.0+sign[0]*xi)*(1.0+sign[1]*eta) for sign in xi_eta_comb])
############################################################################################################
# Calculate derivatives of shape functions-> xi
dN_dxi = np.array([0.25*sign[0]*(1+sign[1]*eta) for sign in xi_eta_comb])
# Calculate derivatives of shape functions-> eta
dN_deta = np.array([0.25*sign[1]*(1+sign[0]*xi) for sign in xi_eta_comb])
# Calculate derivatives of shape functions
dN = np.vstack((dN_dxi, dN_deta))
return N, dN
# Cell
def getShapeQuad8(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 2:
raise NotImplementedError("2D only")
############################################################################################################
# gauss points coords
xi = gaussPoint[0]
eta = gaussPoint[1]
############################################################################################################
# Tuple with xi_a and eta_a combinatory
xi_eta_comb = [(-1,-1),(1,-1),(1,1),(-1,1)]
############################################################################################################
# Calculate shape functions
# Nodes -> 1,2,3,4
N_lateral = np.array([0.25*(1+sign[0]*xi)*(1+sign[1]*eta)*(sign[0]*xi+sign[1]*eta-1) for sign in xi_eta_comb])
# Nodes -> 5,7
N_middle_xi = np.array([0.5*(1-xi**2)*(1+sign*eta) for sign in [-1,1]])
# Nodes -> 6,8
N_middle_eta = np.array([0.5*(1-eta**2)*(1+sign*xi) for sign in [1,-1]])
# Nodes -> ALL
N = np.hstack((N_lateral, N_middle_xi[0], N_middle_eta[0], N_middle_xi[1], N_middle_eta[1]))
############################################################################################################
# Calculate derivatives of shape functions -> xi
# Nodes -> 1,2,3,4
dN_dxi_lateral = np.array([0.25*sign[0]*(1+sign[1]*eta)*(2*sign[0]*xi+sign[1]*eta) for sign in xi_eta_comb])
# Nodes -> 5,7
dN_dxi_middle_xi = np.array([0.5*(1+sign*eta)*(-2.*xi) for sign in [-1,1]])
# Nodes -> 6,8
dN_dxi_middle_eta = np.array([0.5*(1-eta**2)*sign for sign in [1,-1]])
# Nodes -> ALL
dN_dxi = np.hstack((dN_dxi_lateral, dN_dxi_middle_xi[0], dN_dxi_middle_eta[0], dN_dxi_middle_xi[1], dN_dxi_middle_eta[1]))
############################################################################################################
# Calculate derivatives of shape functions -> eta
# Nodes -> 1,2,3,4
dN_deta_lateral = np.array([0.25*sign[1]*(1+sign[0]*xi)*(2*sign[1]*eta+sign[0]*xi) for sign in xi_eta_comb])
# Nodes -> 5,7
dN_deta_middle_xi = np.array([0.5*(1-xi**2)*sign for sign in [-1,1]])
# Nodes -> 6,8
dN_deta_middle_eta = np.array([0.5*(1+sign*xi)*(-2.*eta) for sign in [1,-1]])
# Nodes -> ALL
dN_deta = np.hstack((dN_deta_lateral, dN_deta_middle_xi[0], dN_deta_middle_eta[0],
dN_deta_middle_xi[1], dN_deta_middle_eta[1]))
############################################################################################################
# Calculate derivatives of shape functions
dN = np.vstack((dN_dxi, dN_deta))
return N, dN
# Cell
def getShapeQuad9(gaussPoint):
# Check the dimension of physical space
if gaussPoint.shape[0] != 2:
raise NotImplementedError("2D only")
############################################################################################################
# gauss points coords
xi = gaussPoint[0]
eta = gaussPoint[1]
############################################################################################################
# Tuple with xi_a and eta_a combinatory
xi_eta_comb = [(-1,-1),(1,-1),(1,1),(-1,1)]
############################################################################################################
# Calculate shape functions
# Nodes -> 1,2,3,4
N_lateral = | np.array([0.25*xi*eta*(xi+sign[0])*(eta+sign[1]) for sign in xi_eta_comb]) | numpy.array |
import os
import numpy as np
from PIL import Image
from itertools import product
from collections import defaultdict
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors
import pandas as pd
import torch
import cv2
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.cluster import DBSCAN
import matplotlib.patches as patches
from sklearn.neighbors import NearestNeighbors
class head():
def __init__(self, left_eye=None,right_eye=None, distance=400):
self.l = left_eye
self.r = right_eye
self.distance = distance
def toVect(self, landmark):
out = np.array([[landmark[0][self.l]], [landmark[1][self.r]] ])
return out
def clusterHead(left_eyes, right_eyes, fullHeads=False):
#We use NN to cluster head objects: eyes and nose, assuming there is at least one pair of eyes
if not left_eyes or not right_eyes :
heads = {}
if fullHeads:
for headsita in list(range(len(left_eyes))):
newHead = head(left_eye = headsita)
heads[headsita] = newHead
for headsita in list(range(len(right_eyes))):
newHead = head(right_eye = headsita)
heads[headsita] = newHead
elif len(left_eyes)>1:
neigh = NearestNeighbors(n_neighbors=2)
neigh.fit(left_eyes)
distances, from_right_to_left =neigh.kneighbors(right_eyes)
index_taken = {} #[inr, distances[inr][0]]
queue = list(range(len(right_eyes)))
heads = {}
j = -1
# we examine the terms and correct previous choices
while queue:
index_right_eye = queue[0]
queue = queue[1:]
# we grab the closest left eye to the inr
index_left_eye = from_right_to_left[index_right_eye][0]
if (index_left_eye)==[] and fullHeads:
# if the point is asolated
newHead = head( right_eye=index_right_eye)
heads[j] = newHead
j = j-1
elif index_left_eye not in index_taken:
#new index
newHead = head(left_eye = index_left_eye, right_eye=index_right_eye, distance = distances[index_right_eye][0])
heads[index_left_eye] = newHead
index_taken[index_left_eye] = [index_right_eye, distances[index_right_eye][0]]
else:
# we need to compare distances
newdist = distances[index_right_eye][0]
olddist = index_taken[index_left_eye][1]
if olddist<newdist:
# wrong left eye
index_left_eye = from_right_to_left[index_right_eye][1]
newdist = distances[index_right_eye][1]
olddist = index_taken.get(index_left_eye, [[],None])[1]
if index_left_eye not in index_taken:
newHead = head(left_eye = index_left_eye, right_eye=index_right_eye, distance = distances[index_right_eye][1])
heads[index_left_eye] = newHead
index_taken[index_left_eye] = [index_right_eye, distances[index_right_eye][1]]
elif olddist < newdist and fullHeads: # olddist<newdist
newHead = head( right_eye=index_right_eye)
heads[j] = newHead
j = j-1
else:
queue = queue+[index_taken[index_left_eye][0]]
newHead = head(left_eye = index_left_eye, right_eye=index_right_eye, distance = newdist)
heads[index_left_eye] = newHead
index_taken[index_left_eye] = [index_right_eye, distances[index_right_eye][1]]
else:
# correct left eye already taken
queue = queue+[index_taken[index_left_eye][0]]
newHead = head(left_eye = index_left_eye, right_eye=index_right_eye, distance = newdist)
heads[index_left_eye] = newHead
index_taken[index_left_eye] = [index_right_eye, newdist]
if fullHeads:
missingheads = set(list(range(len(right_eyes)))).difference(index_taken)
else:
missingheads = []
for headsita in missingheads:
newHead = head(left_eye = headsita)
heads[headsita] = newHead
else:
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(right_eyes)
distances, from_right_to_left = neigh.kneighbors(left_eyes)
newHead = head(left_eye = 0, right_eye = from_right_to_left[0][0])
heads = {0:newHead}
return heads
def show_sample(sample, ax=None, color_labels=False, is_tensor=False, **kwargs):
"""Shows a sample with landmarks"""
if not ax:
ax = plt.gca()
color_list = cm.Set1.colors[: len(sample["landmarks"])]
label_color = color_list if color_labels else "r"
if is_tensor:
ax.imshow(sample["image"].permute(1, 2, 0))
else:
ax.imshow(sample["image"])
ax.scatter(
sample["landmarks"][:, 0],
sample["landmarks"][:, 1],
s=20,
marker=".",
c=label_color,
)
ax.axis("off")
# ax.set_title(f'Sample #{sample["index"]}')
return ax
def show_sample_with_mask(sample, color_labels=False, is_tensor=False, **kwargs):
"""Shows a sample with landmarks and mask"""
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
color_list = cm.Set1.colors[: len(sample["landmarks"])]
label_color = color_list if color_labels else "r"
if is_tensor:
ax1.imshow(sample["image"].permute(1, 2, 0))
else:
ax1.imshow(sample["image"])
ax1.scatter(
sample["landmarks"][:, 0],
sample["landmarks"][:, 1],
s=20,
marker=".",
c=label_color,
)
ax1.axis("off")
ax2.imshow(sample["mask"], cmap="gray")
ax2.axis("off")
# ax.set_title(f'Sample #{sample["index"]}')
return fig, (ax1, ax2)
def show_multiple_samples(samples, **kwargs):
"""Shows multiple samples with landmarks"""
n = len(samples)
n_cols = 4 if n > 4 else n
n_rows = int(np.ceil(n / 4))
fig, axs = plt.subplots(n_rows, n_cols, figsize=(n_cols * 5, n_rows * 5))
for i, ax in enumerate(axs.flatten()):
if i < n:
ax = show_sample(samples[i], ax=ax, **kwargs)
else:
ax.axis("off")
return fig, axs
def show_random_sample(dataset, n_samples=4, seed=None, **kwargs):
"""Shows a random sample of images with landmarks."""
if seed:
rng = np.random.RandomState(seed)
else:
rng = np.random.RandomState()
index_list = rng.randint(0, len(dataset), size=n_samples)
fig, axs = show_multiple_samples([dataset[i] for i in index_list], **kwargs)
return fig, axs
def multi_neighborhood_mask(image, landmarks):
"""
Creates a mask in a 3 by 3 neighborhood of each landmark with a unique label
"""
w, h = image.size
mask = np.zeros((w, h))
for mask_index, (x, y) in enumerate(landmarks):
for i, j in product([-1, 0, 1], [-1, 0, 1]):
mask[int(x + i), int(y + j)] = mask_index + 1
return mask.T
def gen_all_masks(samples, root_dir, mask_dir, path_sub):
"""Generate all the masks
Args:
samples: The dataset object. Note it must be the cropped version
root_dir: Location of root data dir
mask_dir: A dir to story the masks. Note it must have same name as image dir
with path_sub[0] replace
path_sub: A list of tuples which is used for replacement of image path.
"""
if not os.path.exists(root_dir + mask_dir):
os.mkdir(root_dir + mask_dir)
for i in range(len(samples)):
h, w = samples.landmark_frame.iloc[i, 1:3]
mask = multi_neighborhood_mask(w, h, samples[i]["landmarks"])
mask_path = samples.img_paths[i].replace(*path_sub[0]).replace(*path_sub[1])
folder, file = os.path.split(mask_path)
if not os.path.exists(folder):
os.mkdir(folder)
np.save(mask_path, mask)
def samples_to_dataframe(samples, landmark_names):
"""Creates a dataframe with the landmarks data and image size.
Note: this function loops over and opens every image and
thus it takes a while to run. The dataframe only need to be created
once. In the future much faster operation can be performed
on the dataframe rather than looping over each sample. This will
improve development
Also this code depends on the ordering of height and width returned
by skimage defined in the dataset creation step. I only bring this
up because PIL and skimage are opposite.
(width, height) for PIL and (height, width) for skimage.
"""
df = pd.DataFrame(
index=range(len(samples)),
columns=["image_name", "height", "width", *landmark_names],
)
for i in range(len(samples)):
record = {}
record["image_name"] = os.path.split(samples.img_paths[i])[-1]
record["height"] = samples[i]["image"].shape[0]
record["width"] = samples[i]["image"].shape[1]
for key, value in zip(landmark_names, samples[i]["landmarks"].ravel()):
record[key] = value
df.iloc[i] = record
return df
def crop_landmarks(df):
"""
Input: landmark dataframe
Output: cropped landmark dataframe
"""
cropped_df = df.copy(deep=True)
for i, row in df.iterrows():
w, h = row["width"], row["height"]
landmarks = np.array(row[3:]).reshape(-1, 2)
cropped_landmarks = deepcopy(landmarks)
for k, (Lx, Ly) in enumerate(landmarks):
if ((h - 1) - Ly) <= 0: # Bottom
cropped_landmarks[k, 1] = (h - 1) - 1
if Ly <= 0: # Top
cropped_landmarks[k, 1] = 1
if ((w - 1) - Lx) <= 0: # Right
cropped_landmarks[k, 0] = (w - 1) - 1
if Lx <= 0: # Left
cropped_landmarks[k, 0] = 1
cropped_df.iloc[i, 3:] = cropped_landmarks.flatten()
return cropped_df
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return colors.LinearSegmentedColormap.from_list(cmap_name, color_list, N)
def calculate_landmarks_from_probs(tensor):
landmarks = np.zeros((len(tensor) - 1, 2))
for i, mask in enumerate(tensor):
M = cv2.moments(mask.numpy())
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
if i > 0:
landmarks[i - 1] = cx, cy
return landmarks
def calculate_landmarks_from_segmentation(seg):
labels = | np.unique(seg) | numpy.unique |
"""Example Gym environment for the RRC 2021 Phase 2."""
import enum
import typing
import gym
import numpy as np
import time
import robot_fingers
# import trifinger_simulation
# import trifinger_simulation.visual_objects
# from trifinger_simulation import trifingerpro_limits
# import trifinger_simulation.tasks.move_cube_on_trajectory as task
# from trifinger_simulation.tasks import move_cube
import rrc_example_package.trifinger_simulation.python.trifinger_simulation as trifinger_simulation
import rrc_example_package.trifinger_simulation.python.trifinger_simulation.visual_objects
from rrc_example_package.trifinger_simulation.python.trifinger_simulation import trifingerpro_limits
import rrc_example_package.trifinger_simulation.python.trifinger_simulation.tasks.move_cube_on_trajectory as task
import rrc_example_package.trifinger_simulation.python.trifinger_simulation.tasks.move_cube as task
from rrc_example_package.trifinger_simulation.python.trifinger_simulation.tasks import move_cube
class ActionType(enum.Enum):
"""Different action types that can be used to control the robot."""
#: Use pure torque commands. The action is a list of torques (one per
#: joint) in this case.
TORQUE = enum.auto()
#: Use joint position commands. The action is a list of angular joint
#: positions (one per joint) in this case. Internally a PD controller is
#: executed for each action to determine the torques that are applied to
#: the robot.
POSITION = enum.auto()
#: Use both torque and position commands. In this case the action is a
#: dictionary with keys "torque" and "position" which contain the
#: corresponding lists of values (see above). The torques resulting from
#: the position controller are added to the torques in the action before
#: applying them to the robot.
TORQUE_AND_POSITION = enum.auto()
class BaseCubeTrajectoryEnv(gym.GoalEnv):
"""Gym environment for moving cubes with TriFingerPro."""
def __init__(
self,
goal_trajectory: typing.Optional[task.Trajectory] = None,
action_type: ActionType = ActionType.POSITION,
step_size: int = 1,
disable_arm3 = False
):
"""Initialize.
Args:
goal_trajectory: Goal trajectory for the cube. If ``None`` a new
random trajectory is sampled upon reset.
action_type: Specify which type of actions to use.
See :class:`ActionType` for details.
step_size: Number of actual control steps to be performed in one
call of step().
"""
# Basic initialization
# ====================
if goal_trajectory is not None:
task.validate_goal(goal_trajectory)
self.goal = goal_trajectory
self.action_type = action_type
self.disable_arm3 = disable_arm3
if step_size < 1:
raise ValueError("step_size cannot be less than 1.")
self.step_size = step_size
# will be initialized in reset()
self.platform = None
# Create the action and observation spaces
# ========================================
robot_torque_space = gym.spaces.Box(
low=trifingerpro_limits.robot_torque.low,
high=trifingerpro_limits.robot_torque.high,
)
robot_position_space = gym.spaces.Box(
low=trifingerpro_limits.robot_position.low,
high=trifingerpro_limits.robot_position.high,
)
robot_velocity_space = gym.spaces.Box(
low=trifingerpro_limits.robot_velocity.low,
high=trifingerpro_limits.robot_velocity.high,
)
robot_tip_force_space = gym.spaces.Box(
low=np.array([0, 0, 0], dtype=np.float32),
high=np.array([1, 1, 1], dtype=np.float32),
)
object_state_space = gym.spaces.Dict(
{
"position": gym.spaces.Box(
low=trifingerpro_limits.object_position.low,
high=trifingerpro_limits.object_position.high,
),
"orientation": gym.spaces.Box(
low=trifingerpro_limits.object_orientation.low,
high=trifingerpro_limits.object_orientation.high,
),
"confidence": gym.spaces.Box(
low=np.array(0),
high=np.array(1),
),
}
)
if self.action_type == ActionType.TORQUE:
self.action_space = robot_torque_space
self._initial_action = trifingerpro_limits.robot_torque.default
elif self.action_type == ActionType.POSITION:
self.action_space = robot_position_space
self._initial_action = trifingerpro_limits.robot_position.default
elif self.action_type == ActionType.TORQUE_AND_POSITION:
self.action_space = gym.spaces.Dict(
{
"torque": robot_torque_space,
"position": robot_position_space,
}
)
self._initial_action = {
"torque": trifingerpro_limits.robot_torque.default,
"position": trifingerpro_limits.robot_position.default,
}
else:
raise ValueError("Invalid action_type")
self.observation_space = gym.spaces.Dict(
{
"robot_observation": gym.spaces.Dict(
{
"position": robot_position_space,
"velocity": robot_velocity_space,
"torque": robot_torque_space,
"tip_force": robot_tip_force_space,
}
),
"object_observation": gym.spaces.Dict(
{
"position": object_state_space["position"],
"orientation": object_state_space["orientation"],
"confidence": object_state_space["confidence"],
}
),
"action": self.action_space,
"desired_goal": object_state_space["position"],
"achieved_goal": object_state_space["position"],
}
)
def compute_reward_rrc(
self,
achieved_goal: task.Position,
desired_goal: task.Position,
info: dict,
) -> float:
"""Compute the reward for the given achieved and desired goal.
Args:
achieved_goal: Current position of the object.
desired_goal: Goal position of the current trajectory step.
info: An info dictionary containing a field "time_index" which
contains the time index of the achieved_goal.
Returns:
The reward that corresponds to the provided achieved goal w.r.t. to
the desired goal. Note that the following should always hold true::
ob, reward, done, info = env.step()
assert reward == env.compute_reward(
ob['achieved_goal'],
ob['desired_goal'],
info,
)
"""
# This is just some sanity check to verify that the given desired_goal
# actually matches with the active goal in the trajectory.
active_goal = np.asarray(
task.get_active_goal(
self.info["trajectory"], self.info["time_index"]
)
)
assert np.all(active_goal == desired_goal), "{}: {} != {}".format(
info["time_index"], active_goal, desired_goal
)
return -task.evaluate_state(
info["trajectory"], info["time_index"], achieved_goal
)
def step(self, action):
"""Run one timestep of the environment's dynamics.
When end of episode is reached, you are responsible for calling
``reset()`` to reset this environment's state.
Args:
action: An action provided by the agent (depends on the selected
:class:`ActionType`).
Returns:
tuple:
- observation (dict): agent's observation of the current
environment.
- reward (float): amount of reward returned after previous action.
- done (bool): whether the episode has ended, in which case further
step() calls will return undefined results.
- info (dict): info dictionary containing the current time index.
"""
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
def seed(self, seed=None):
"""Sets the seed for this env’s random number generator.
.. note::
Spaces need to be seeded separately. E.g. if you want to sample
actions directly from the action space using
``env.action_space.sample()`` you can set a seed there using
``env.action_space.seed()``.
Returns:
List of seeds used by this environment. This environment only uses
a single seed, so the list contains only one element.
"""
self.np_random, seed = gym.utils.seeding.np_random(seed)
task.seed(seed)
return [seed]
def _create_observation(self, t, action, obs_type='default'):
robot_observation = self.platform.get_robot_observation(t)
camera_observation = self.platform.get_camera_observation(t)
object_observation = camera_observation.filtered_object_pose
active_goal = np.asarray(
task.get_active_goal(self.info["trajectory"], t)
)
observation = {
"robot_observation": {
"position": robot_observation.position,
"velocity": robot_observation.velocity,
"torque": robot_observation.torque,
"tip_force": robot_observation.tip_force,
},
"object_observation": {
"position": object_observation.position,
"orientation": object_observation.orientation,
"confidence": object_observation.confidence,
},
"action": action,
"desired_goal": active_goal,
"achieved_goal": object_observation.position,
}
return observation
def _gym_action_to_robot_action(self, gym_action):
# construct robot action depending on action type
if self.action_type == ActionType.TORQUE:
robot_action = self.platform.Action(torque=gym_action)
elif self.action_type == ActionType.POSITION:
robot_action = self.platform.Action(position=gym_action)
elif self.action_type == ActionType.TORQUE_AND_POSITION:
robot_action = self.platform.Action(
torque=gym_action["torque"], position=gym_action["position"]
)
else:
raise ValueError("Invalid action_type")
return robot_action
class SimtoRealEnv(BaseCubeTrajectoryEnv):
"""Gym environment for moving cubes with simulated TriFingerPro."""
def __init__(
self,
action_type: ActionType = ActionType.TORQUE,
difficulty=None, sparse_rewards=True, step_size=101, distance_threshold=0.02, max_steps=50, visualization=False,
goal_trajectory=None, steps_per_goal=50, xy_only=False,
env_type='sim', obs_type='default', env_wrapped=False, increase_fps=False, disable_arm3=False, single_tra = False
):
"""Initialize.
Args:
goal_trajectory: Goal trajectory for the cube. If ``None`` a new
random trajectory is sampled upon reset.
action_type (ActionType): Specify which type of actions to use.
See :class:`ActionType` for details.
step_size (int): Number of actual control steps to be performed in
one call of step().
visualization (bool): If true, the pyBullet GUI is run for
visualization.
"""
super().__init__(
goal_trajectory=goal_trajectory,
action_type=action_type,
step_size=step_size,
disable_arm3=disable_arm3
)
self.visualization = visualization
self.goal_trajectory = goal_trajectory
self.sparse_rewards = sparse_rewards
self.prev_object_obs = None
self._max_episode_steps = max_steps # 50 * step_size simulator steps
self.distance_threshold = distance_threshold
self.step_size = step_size
self.steps_per_goal = steps_per_goal
self.xy_only = xy_only
self.env_type = env_type
self.obs_type = obs_type
self.env_wrapped = env_wrapped
self.increase_fps = increase_fps
self.disable_arm3 = disable_arm3
self.cube_scale = 1
if self.obs_type == 'default':
self.z_pos = 29
else:
self.z_pos = 32
if self.sparse_rewards:
self.compute_reward = self.compute_sparse_reward
else:
self.compute_reward = self.compute_reward_rrc
if goal_trajectory is None:
if difficulty != None:
task.GOAL_DIFFICULTY = difficulty
if steps_per_goal != None:
#: Number of time steps for which the first goal in the trajectory is active.
task.FIRST_GOAL_DURATION = steps_per_goal * step_size
#: Number of time steps for which following goals in the trajectory are active.
task.GOAL_DURATION = steps_per_goal * step_size
# if single_tra:
# task.FIRST_GOAL_DURATION = 3750
def step(self, action, initial=False):
"""Run one timestep of the environment's dynamics.
When end of episode is reached, you are responsible for calling
``reset()`` to reset this environment's state.
Args:
action: An action provided by the agent (depends on the selected
:class:`ActionType`).
Returns:
tuple:
- observation (dict): agent's observation of the current
environment.
- reward (float): amount of reward returned after previous action.
- done (bool): whether the episode has ended, in which case further
step() calls will return undefined results.
- info (dict): info dictionary containing the current time index.
"""
if self.platform is None:
raise RuntimeError("Call `reset()` before starting to step.")
if not self.action_space.contains(action):
raise ValueError(
"Given action is not contained in the action space."
)
num_steps = self.step_size
# ensure episode length is not exceeded due to step_size
step_count_after = self.info["time_index"] + num_steps
if step_count_after > move_cube.EPISODE_LENGTH:
excess = step_count_after - move_cube.EPISODE_LENGTH
num_steps = max(1, num_steps - excess)
action_to_apply = action.copy()
if self.disable_arm3:
assert self.action_type == ActionType.TORQUE, 'Disabling of arm 3 only implemented for torque control'
action_to_apply[6:9] = np.array([-self.action_space.high[0],self.action_space.high[0],-self.action_space.high[0]])
reward = 0.0
for _ in range(num_steps):
# send action to robot
robot_action = self._gym_action_to_robot_action(action_to_apply)
t = self.platform.append_desired_action(robot_action)
if self.env_type == 'sim' and self.visualization:
# update goal visualization
goal_position = task.get_active_goal(
self.info["trajectory"], t
)
self.goal_marker.set_state(goal_position, (0, 0, 0, 1))
self.info["time_index"] = t
#TODO: No need to create obs until loop ended
observation = self._create_observation(
self.info["time_index"], action, obs_type='full'
)
self.info["rrc_reward"] += self.compute_reward_rrc(
observation["achieved_goal"],
observation["desired_goal"],
self.info,
)
if initial:
self._active_goal = observation["desired_goal"]
break
is_done = self.info["time_index"] >= self._max_episode_steps * self.step_size or self.info["time_index"] >= task.EPISODE_LENGTH
# Flatten and update obs here if env is not wrapped, else do it in wrapper env
if not self.env_wrapped:
observation = self._update_obj_vel(observation, initial)
observation = self.flatten_obs(observation)
# Compute reward based on 'active goal'
reward = self.compute_reward(observation['achieved_goal'], self._active_goal, self.info)
self.info["is_success"] = self.compute_sparse_reward(observation['achieved_goal'], self._active_goal, None, check_success=True) == 0
self.info["xy_fail"] = self.compute_xy_fail(observation['achieved_goal'], self._active_goal)
# Update active goal (lags 1 behind)
self._active_goal = observation["desired_goal"]
return observation, reward, is_done, self.info
def reset(self, difficulty=None, init_state='normal', noisy=False, noise_level=1):
"""Reset the environment."""
move_cube._CUBE_WIDTH = move_cube._CUBE_WIDTH * self.cube_scale
if self.goal_trajectory == None and difficulty != None:
move_cube.GOAL_DIFFICULTY = difficulty
# hard-reset simulation
del self.platform
if self.env_type == 'sim':
rob_position, cube_pos, cube_orient = self.sample_init_state(init_state, noisy, noise_level=noise_level)
object_pose = task.move_cube.Pose(
position=cube_pos,
orientation=cube_orient
)
self.platform = trifinger_simulation.TriFingerPlatform(
visualization=self.visualization,
initial_robot_position=rob_position,
initial_object_pose=object_pose,
cube_scale=self.cube_scale
)
if self.increase_fps:
self.platform.camera_rate_fps = 26
elif self.env_type == 'real':
self.platform = robot_fingers.TriFingerPlatformWithObjectFrontend()
else:
assert False, "Env type must be either sim or real"
# if no goal is given, sample one randomly
if self.goal is None:
trajectory = task.sample_goal()
else:
trajectory = self.goal
# visualize the goal
if self.visualization and self.env_type == 'sim':
self.goal_marker = trifinger_simulation.visual_objects.CubeMarker(
width=task.move_cube._CUBE_WIDTH,
position=trajectory[0][1],
orientation=(0, 0, 0, 1),
pybullet_client_id=self.platform.simfinger._pybullet_client_id,
)
self.info = {"time_index": -1, "trajectory": trajectory, "rrc_reward": 0, "xy_fail": False}
# need to already do one step to get initial observation
observation, _, _, _ = self.step(self._initial_action, initial=True)
return observation
def _update_obj_vel(self, observation, initial):
if initial:
# Initial cube velocities are 0 (or should be)
observation['object_observation']['lin_vel'] = np.zeros_like(observation["object_observation"]["position"])
observation['object_observation']['ang_vel'] = np.zeros_like(observation["object_observation"]["orientation"])
self._last_obs_vel = {
"position": observation['object_observation']['lin_vel'].copy(),
"orientation": observation['object_observation']['ang_vel'].copy()
}
self._steps_since_obj_update = 0
else:
lin_vel = observation['object_observation']['position'] - self._prev_object_obs['position']
ang_vel = observation['object_observation']['orientation'] - self._prev_object_obs['orientation']
# If object pose is updated, update object velocities
if self.check_obs_updated(lin_vel, ang_vel):
# Just take velocity as diff in observations
observation['object_observation']['lin_vel'] = lin_vel
observation['object_observation']['ang_vel'] = ang_vel
# Update last obs-diff
self._last_obs_vel['position'] = lin_vel
self._last_obs_vel['orientation'] = ang_vel
self._steps_since_obj_update = 0
# Else maintain previous velocities and increment counter
else:
observation['object_observation']['lin_vel'] = self._last_obs_vel['position']
observation['object_observation']['ang_vel'] = self._last_obs_vel['orientation']
self._steps_since_obj_update += 1
# Add counter to obs
observation['object_observation']['steps_since_update'] = np.array([self._steps_since_obj_update])
# Set previous object obs to current
self._prev_object_obs = {
"position": observation["object_observation"]["position"],
"orientation": observation["object_observation"]["orientation"]
}
return observation
def check_obs_updated(self, lin_vel, ang_vel):
# If all differences are 0.0 then obs has not been updated
check = np.sum(lin_vel != 0.0) + np.sum(ang_vel != 0.0)
return check != 0
# Concat robot obs and object obs into single array.
# WARNING: if changes are made, must change self.z_pos to atch
def flatten_obs(self, observation):
state_obs = None
if self.obs_type =='default':
# Robot obs
state_obs = observation['robot_observation']['position']
state_obs = np.concatenate((state_obs, observation['robot_observation']['velocity']))
state_obs = np.concatenate((state_obs, observation['robot_observation']['torque']))
# Object obs
state_obs = np.concatenate((state_obs, observation['object_observation']['position']))
state_obs = np.concatenate((state_obs, observation['object_observation']['orientation']))
state_obs = np.concatenate((state_obs, observation['object_observation']['lin_vel']))
state_obs = np.concatenate((state_obs, observation['object_observation']['ang_vel']))
else:
# Robot obs
state_obs = observation['robot_observation']['position']
state_obs = | np.concatenate((state_obs, observation['robot_observation']['velocity'])) | numpy.concatenate |
import sys
sys.path.append("python")
from SurfStatT import *
import surfstat_wrap as sw
import numpy as np
import sys
import pytest
sw.matlab_init_surfstat()
def dummy_test(slm, contrast):
try:
# wrap matlab functions
Wrapped_slm = sw.matlab_SurfStatT(slm, contrast)
except:
pytest.skip("Original MATLAB code does not work with these inputs.")
# run python functions
Python_slm = py_SurfStatT(slm, contrast)
testout_SurfStatT = []
# compare matlab-python outputs
for key in Wrapped_slm:
testout_SurfStatT.append(np.allclose(Python_slm[key], Wrapped_slm[key], \
rtol=1e-05, equal_nan=True))
assert all(flag == True for (flag) in testout_SurfStatT)
#### Test 1
def test_1d_row_vectors():
a = np.random.randint(1,10)
A = {}
A['X'] = np.random.rand(a,1)
A['df'] = np.array([[3.0]])
A['coef'] = np.random.rand(1,a).reshape(1,a)
A['SSE'] = np.random.rand(1, a)
B = np.random.rand(1).reshape(1,1)
dummy_test(A, B)
#### Test 2 ### square matrices
def test_2d_square_matrix():
a = np.random.randint(1,10)
b = np.random.randint(1,10)
A = {}
A['X'] = np.random.rand(a,a)
A['df'] = np.array([[b]])
A['coef'] = np.random.rand(a,a)
A['SSE'] = np.random.rand(1, a)
B = | np.random.rand(1, a) | numpy.random.rand |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, UpSampling2D,Conv2DTranspose, Concatenate
from tensorflow.keras import Model, regularizers
import csv
import numpy as np
import matplotlib.pyplot as plt
import glob
from time import time
from tensorflow.keras.regularizers import l2
# In[2]:
path = "ML_forVidya/ML_steady_state_raw_data_folder_training/ml_raw_data_*.csv"
num_train = len(glob.glob(path))
power_map_train = np.zeros((num_train,34,32))
temp_map_train = np.zeros((num_train,34,32))
for im_num,fname in enumerate(glob.glob(path)):
with open(fname) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
x = int(np.round(float(row[1])/2.5e-4))
y = int(np.round(float(row[2])/2.5e-4))
dyn_pow = float(row[3])
leak_pow = float(row[4])
alpha = float(row[5])
power_map_train[im_num,x,y] = alpha*dyn_pow + leak_pow
temp_map_train[im_num,x,y] = float(row[7])
max_temp = np.max(temp_map_train)
max_power = np.max(power_map_train)
power_map_train = power_map_train/max_power
temp_map_train = temp_map_train/max_temp
power_map_train = power_map_train[...,np.newaxis]
temp_map_train = temp_map_train[...,np.newaxis]
# In[3]:
print(power_map_train.shape)
for im_num,power in enumerate(power_map_train):
plt.figure()
plt.imshow(np.squeeze(power))
plt.figure()
plt.imshow(np.squeeze(temp_map_train[im_num,...]))
# In[4]:
path = "ML_forVidya/ML_steady_state_raw_data_folder_testing/ml_raw_data_*.csv"
num_test = len(glob.glob(path))
power_map_test = np.zeros((num_test,34,32))
temp_map_test = np.zeros((num_test,34,32))
for im_num,fname in enumerate(glob.glob(path)):
with open(fname) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
x = int(np.round(float(row[1])/2.5e-4))
y = int(np.round(float(row[2])/2.5e-4))
dyn_pow = float(row[3])
leak_pow = float(row[4])
alpha = float(row[5])
power_map_test[im_num,x,y] = alpha*dyn_pow + leak_pow
temp_map_test[im_num,x,y] = float(row[7])
power_map_test = power_map_test/max_power
temp_map_test = temp_map_test/max_temp
power_map_test = power_map_test[...,np.newaxis]
temp_map_test = temp_map_test[...,np.newaxis]
# In[5]:
print(power_map_test.shape)
for im_num,power in enumerate(power_map_test):
plt.figure()
plt.imshow(np.squeeze(power))
plt.figure()
plt.imshow(np.squeeze(temp_map_test[im_num,...]))
# In[6]:
train_ds = tf.data.Dataset.from_tensor_slices(
(power_map_train, temp_map_train)).batch(1)
test_ds = tf.data.Dataset.from_tensor_slices((power_map_test, temp_map_test)).batch(1)
# In[7]:
reg_rate =0.001
class encoder(Model):
def __init__(self):
super(encoder, self).__init__()
self.conv1 = Conv2D(64, 3, activation='relu',padding='SAME',kernel_regularizer=l2(reg_rate), bias_regularizer=l2(reg_rate))
self.max1 = MaxPooling2D(2, padding='same')
self.conv2 = Conv2D(32, 3, activation='relu',padding='SAME',kernel_regularizer=l2(reg_rate), bias_regularizer=l2(reg_rate))
self.max2 = MaxPooling2D(2, padding='same')
self.conv3 = Conv2D(16, 5, activation='relu',padding='SAME',kernel_regularizer=l2(reg_rate), bias_regularizer=l2(reg_rate))
self.max3 = MaxPooling2D(2, padding='same')
# self.dense = Dense(128,activation='relu',kernel_regularizer=l2(reg_rate), bias_regularizer=l2(reg_rate))
def call(self, x):
x0 = self.conv1(x)
x1 = self.max1(x0)
x1 = self.conv2(x1)
x2 = self.max2(x1)
x2 = self.conv3(x2)
x3 = self.max3(x2)
return (x0,x1,x2,x3)
class decoder(Model):
def __init__(self):
super(decoder, self).__init__()
self.conv0 = Conv2DTranspose(16, 7, activation='relu',padding='SAME',kernel_regularizer=l2(reg_rate), bias_regularizer=l2(reg_rate))
self.max1 = UpSampling2D(2)
self.conv1 = Conv2DTranspose(32, 7, activation='relu',padding='SAME',kernel_regularizer=l2(reg_rate), bias_regularizer=l2(reg_rate))
self.max2 = UpSampling2D(2)
self.conv2 = Conv2DTranspose(64, 3, activation='relu',padding='SAME',kernel_regularizer=l2(reg_rate), bias_regularizer=l2(reg_rate))
self.max3 = UpSampling2D(2)
self.conv3 = Conv2DTranspose(1, 3, activation='relu',padding='SAME',kernel_regularizer=l2(reg_rate), bias_regularizer=l2(reg_rate))
def call(self, vals):
x1 = self.conv0(vals[3])
x1 = self.max1(x1)
x1_shape = tf.shape(vals[2])
x1 = tf.slice(x1, tf.zeros(x1_shape.shape,dtype=tf.dtypes.int32), x1_shape)
x1 = Concatenate()([x1, vals[2]])
x2 = self.conv1(x1)
x2 = self.max2(x2)
x2_shape = tf.shape(vals[1])
# print(x2_shape)
x2 = tf.slice(x2, [0,0,0,0], x2_shape)
x2 = Concatenate()([x2, vals[1]])
x3 = self.conv2(x2)
x3 = self.max3(x3)
x3_shape = tf.shape(vals[0])
x3 = tf.slice(x3, [0,0,0,0], x3_shape)
# x3 = tf.slice(x3, tf.zeros(x3_shape.shape,dtype=tf.dtypes.int32), x3_shape)
x3 = Concatenate()([x3, vals[0]])
x4 = self.conv3(x3)
return x4
class autoencoder(Model):
def __init__(self):
super(autoencoder, self).__init__()
self.ae = encoder()
self.de = decoder()
def call(self, x):
vals = self.ae(x)
x = self.de(vals)
return x
# Create an instance of the model
model = autoencoder()
# In[8]:
initial_learning_rate = 0.001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=1000,
decay_rate=0.98,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
loss='mse',
metrics=['mse', 'mae', 'mape'])
# In[ ]:
st = time()
history = model.fit(train_ds, epochs=1500,
#steps_per_epoch=195,
validation_data=test_ds,
validation_freq=1
#validation_steps=3
)
et = time()
tt = et-st
print("Elapsed time: %03d:%02d:%05.2f"%(int(tt/3600),int(tt/60)%60,tt%60))
# In[ ]:
from matplotlib import pyplot
# pyplot.plot(history.history['mse'])
# pyplot.plot(history.history['mae'])
pyplot.plot(history.history['mape'])
# pyplot.plot(history.history['cosine_proximity'])
pyplot.show()
# In[ ]:
y_pred = model.predict(test_ds)
print(y_pred.shape)
for im_num,temp in enumerate(y_pred):
plt.figure()
fig, axes = plt.subplots(2, 2)
denorm_temp = np.squeeze(temp*max_temp)
denorm_pred_temp = (np.squeeze(temp_map_test[im_num,...])*max_temp)
max_temp_im = max(np.max(denorm_temp),np.max(denorm_pred_temp))
min_temp_im = min(np.min(denorm_temp),np.min(denorm_pred_temp))
err = abs(denorm_pred_temp - denorm_temp)
im = axes[0,1].imshow(denorm_pred_temp,vmin=0, vmax=max_temp_im)
im = axes[1,0].imshow(err,vmin=0, vmax=max_temp_im)
im = axes[0,0].imshow(denorm_temp,vmin=0, vmax=max_temp_im)
axes[1,1].axis('off')
fig.colorbar(im, ax=axes.ravel().tolist())
plt.show()
print(np.max(err))
# In[ ]:
y_pred = model.predict(train_ds)
print(y_pred.shape)
for im_num,temp in enumerate(y_pred):
plt.figure()
fig, axes = plt.subplots(2, 2)
denorm_temp = np.squeeze(temp*max_temp)
denorm_pred_temp = ( | np.squeeze(temp_map_train[im_num,...]) | numpy.squeeze |
import cv2
import os
import numpy as np
from pathlib import Path
from pycocotools.coco import COCO
from utils import MonoCamera, initialise_data_dictionary
import json
def correct_distortion(src_img, map_x, map_y, dst_bit=8):
src_img = np.clip(src_img, 0, 2 ** dst_bit - 1)
dst_img = cv2.remap(src_img, map_x, map_y, cv2.INTER_LINEAR)
return dst_img
def calc_distortion_mapping(camera_parameter_file_path, width, height):
mono_cam = MonoCamera(camera_parameter_file_path)
cam_mat, _ = cv2.getOptimalNewCameraMatrix(
mono_cam.K, mono_cam.d, (width, height), 0
)
map_x, map_y = cv2.initUndistortRectifyMap(
mono_cam.K, mono_cam.d, None, cam_mat, (width, height), 5
)
return map_x, map_y
def get_camera_matrix(camera_parameter_file_path, width, height):
mono_cam = MonoCamera(camera_parameter_file_path)
cam_mat, _ = cv2.getOptimalNewCameraMatrix(
mono_cam.K, mono_cam.d, (width, height), 0
)
return cam_mat
def undistort_point(point, distortion_mapping_x, distortion_mapping_y):
point = point.reshape(2)
x_distance = np.abs(distortion_mapping_x - point[0])
y_distance = np.abs(distortion_mapping_y - point[1])
total_distance = x_distance + y_distance
return np.flip(np.unravel_index(total_distance.argmin(), total_distance.shape))
def undistort_points(points, distortion_mapping_x, distortion_mapping_y):
new_points = []
for point in points:
print("old point: " + str(point))
new_point = undistort_point(point, distortion_mapping_x, distortion_mapping_y)
print("new point: " + str(new_point))
if new_point is not None:
new_points.append(new_point)
else:
print("point outside of edge of image, using original point")
print(point)
exit()
new_points.append(point.reshape(2).tolist())
print(new_points)
return new_points
def coco_labels_distort_correction(
coco_labels_filepath,
output_coco_labels_filepath,
camera_parameter_file_path,
image_width,
image_height,
):
data = initialise_data_dictionary(coco_labels_filepath + "annotations.json")
# load coco info
coco = COCO(coco_labels_filepath + "annotations.json")
img_ids = coco.getImgIds()
cat_ids = coco.getCatIds()
for img_id in img_ids:
print(img_id)
# get annotations for image
anns_ids = coco.getAnnIds(imgIds=[img_id], catIds=cat_ids, iscrowd=None)
anns = coco.loadAnns(anns_ids)
coco_image = coco.loadImgs([img_id])[0]
image_width = coco_image["width"]
image_height = coco_image["height"]
image_name = coco_image["file_name"].split("/")[1].split(".")[0]
distortion_mapping_x, distortion_mapping_y = calc_distortion_mapping(
camera_parameter_file_path, image_width, image_height
)
print(distortion_mapping_x.shape)
print(distortion_mapping_y.shape)
# loop through, correcting scale on each annotation and appending to new list of annotations
for ann in anns:
n_points = int(len(ann["segmentation"][0]) / 2)
bbox = np.reshape( | np.array(ann["bbox"], dtype=float) | numpy.array |
def icp(a, b,
max_time=1
):
import cv2
import numpy
# import copy
# import pylab
import time
import sys
import sklearn.neighbors
import scipy.optimize
def res(p, src, dst):
T = numpy.matrix([[numpy.cos(p[2]), -numpy.sin(p[2]), p[0]],
[numpy.sin(p[2]), numpy.cos(p[2]), p[1]],
[0, 0, 1]])
n = numpy.size(src, 0)
xt = numpy.ones([n, 3])
xt[:, :-1] = src
xt = (xt * T.T).A
d = numpy.zeros(numpy.shape(src))
d[:, 0] = xt[:, 0] - dst[:, 0]
d[:, 1] = xt[:, 1] - dst[:, 1]
r = numpy.sum(numpy.square(d[:, 0]) + numpy.square(d[:, 1]))
return r
def jac(p, src, dst):
T = numpy.matrix([[numpy.cos(p[2]), -numpy.sin(p[2]), p[0]],
[numpy.sin(p[2]), numpy.cos(p[2]), p[1]],
[0, 0, 1]])
n = numpy.size(src, 0)
xt = numpy.ones([n, 3])
xt[:, :-1] = src
xt = (xt * T.T).A
d = numpy.zeros(numpy.shape(src))
d[:, 0] = xt[:, 0] - dst[:, 0]
d[:, 1] = xt[:, 1] - dst[:, 1]
dUdth_R = numpy.matrix([[-numpy.sin(p[2]), -numpy.cos(p[2])],
[numpy.cos(p[2]), -numpy.sin(p[2])]])
dUdth = (src * dUdth_R.T).A
g = numpy.array([numpy.sum(2 * d[:, 0]),
numpy.sum(2 * d[:, 1]),
numpy.sum(2 * (d[:, 0] * dUdth[:, 0] + d[:, 1] * dUdth[:, 1]))])
return g
def hess(p, src, dst):
n = numpy.size(src, 0)
T = numpy.matrix([[numpy.cos(p[2]), -numpy.sin(p[2]), p[0]],
[numpy.sin(p[2]), numpy.cos(p[2]), p[1]],
[0, 0, 1]])
n = numpy.size(src, 0)
xt = numpy.ones([n, 3])
xt[:, :-1] = src
xt = (xt * T.T).A
d = numpy.zeros(numpy.shape(src))
d[:, 0] = xt[:, 0] - dst[:, 0]
d[:, 1] = xt[:, 1] - dst[:, 1]
dUdth_R = numpy.matrix([[-numpy.sin(p[2]), -numpy.cos(p[2])], [numpy.cos(p[2]), -numpy.sin(p[2])]])
dUdth = (src * dUdth_R.T).A
H = numpy.zeros([3, 3])
H[0, 0] = n * 2
H[0, 2] = numpy.sum(2 * dUdth[:, 0])
H[1, 1] = n * 2
H[1, 2] = numpy.sum(2 * dUdth[:, 1])
H[2, 0] = H[0, 2]
H[2, 1] = H[1, 2]
d2Ud2th_R = numpy.matrix([[-numpy.cos(p[2]), numpy.sin(p[2])], [-numpy.sin(p[2]), -numpy.cos(p[2])]])
d2Ud2th = (src * d2Ud2th_R.T).A
H[2, 2] = numpy.sum(2 * (
numpy.square(dUdth[:, 0]) + numpy.square(dUdth[:, 1]) + d[:, 0] * d2Ud2th[:, 0] + d[:, 0] * d2Ud2th[
:, 0]))
return H
t0 = time.time()
init_pose = (0, 0, 0)
src = numpy.array([a.T], copy=True).astype(numpy.float32)
dst = numpy.array([b.T], copy=True).astype(numpy.float32)
Tr = numpy.array([[numpy.cos(init_pose[2]), -numpy.sin(init_pose[2]), init_pose[0]],
[numpy.sin(init_pose[2]), | numpy.cos(init_pose[2]) | numpy.cos |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_arrays
from ..utils import atleast2d_or_csc
from ..utils import array2d
from ..utils import atleast2d_or_csr
from ..utils import safe_asarray
from ..utils import warn_if_not_float
from ..utils.sparsefuncs import inplace_csr_row_normalize_l1
from ..utils.sparsefuncs import inplace_csr_row_normalize_l2
from ..utils.sparsefuncs import inplace_csr_column_scale
from ..utils.sparsefuncs import mean_variance_axis0
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'Normalizer',
'OneHotEncoder',
'Scaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
warn_if_not_float(X, estimator='The scale function')
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis0(X)
var[var == 0.0] = 1.0
inplace_csr_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
warn_if_not_float(X, estimator='The scale function')
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default is True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
`min_` : ndarray, shape (n_features,)
Per feature adjustment for minimum.
`scale_` : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
warn_if_not_float(X, estimator=self)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
# Do not scale constant features
data_range[data_range == 0.0] = 1.0
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
X -= self.min_
X /= self.scale_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
`mean_` : array of floats with shape [n_features]
The mean value for each feature in the training set.
`std_` : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_arrays(X, copy=self.copy, sparse_format="csr")[0]
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis0(X)[1]
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
X = check_arrays(X, copy=copy, sparse_format="csr")[0]
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_csr_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_csr_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class Scaler(StandardScaler):
def __init__(self, copy=True, with_mean=True, with_std=True):
warnings.warn("Scaler was renamed to StandardScaler. The old name "
" will be removed in 0.15.", DeprecationWarning)
super(Scaler, self).__init__(copy, with_mean, with_std)
def normalize(X, norm='l2', axis=1, copy=True):
"""Normalize a dataset along any axis
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)[:, np.newaxis]
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = np.sqrt(np.sum(X ** 2, axis=1))[:, np.newaxis]
norms[norms == 0.0] = 1.0
X /= norms
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
atleast2d_or_csr(X)
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
atleast2d_or_csr(X)
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
sparse_format = "csr" # We force sparse format to be either csr or csc.
if hasattr(X, "format"):
if X.format in ["csr", "csc"]:
sparse_format = X.format
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
atleast2d_or_csr(X)
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = array2d(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
K = array2d(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = safe_asarray(X)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = atleast2d_or_csc(X, copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix were each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
Attributes
----------
`active_features_` : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
`feature_indices_` : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
`n_values_` : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
n_values='auto')
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_arrays(X, sparse_format='dense', dtype=np.int)[0]
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = | np.hstack([[0], n_values]) | numpy.hstack |
import numpy as np
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, classification_report
from config import cfg
label_list = cfg['LABEL_LIST']
# The reason why set the class is to record the history of the metrics
class EvalClass(object):
def __init__(self):
self.acc = []
self.f1 = []
def compute_metrics(self, p):
predictions, labels = p
predictions = | np.argmax(predictions, axis=-1) | numpy.argmax |
import os
import logging
import numpy as np
import pandas as pd
import math
import matplotlib.colors
import matplotlib.pyplot
import bokeh.models
import bokeh.plotting
import bokeh.core.properties
import bokeh.layouts
import scipy.stats
import remixt.utils
chromosomes = [str(a) for a in range(1, 23)] + ['X']
def _create_chromosome_colors(chromosomes):
color_map = matplotlib.pyplot.get_cmap('Set1')
chromosome_colors = list()
for i in range(len(chromosomes)):
rgb_color = color_map(float(i) / float(len(chromosomes)))
hex_color = matplotlib.colors.rgb2hex(rgb_color)
chromosome_colors.append(hex_color)
chromosome_colors = pd.DataFrame({'chromosome': chromosomes, 'scatter_color': chromosome_colors})
return chromosome_colors
chromosome_colors = _create_chromosome_colors(chromosomes)
def _create_chromosome_indices(chromosomes):
chromosome_indices = dict([(chromosome, idx) for idx, chromosome in enumerate(chromosomes)])
return chromosome_indices
chromosome_indices = _create_chromosome_indices(chromosomes)
def major_minor_scatter_plot(source):
""" Plot a major / minor scatter plot from a copy number data source
"""
p = bokeh.plotting.Figure(
title='raw major vs minor',
plot_width=1000, plot_height=500,
tools='pan,wheel_zoom,box_select,reset,lasso_select',
# logo=None,
x_range=[-0.5, 6.5],
y_range=[-0.5, 4.5],
)
p.title.text_font_size=bokeh.core.properties.value('10pt')
p.circle(
x='major_raw', y='minor_raw',
size='scatter_size', color='scatter_color', alpha=0.5,
source=source,
)
return p
def major_minor_segment_plot(source, major_column, minor_column, x_range, name, width=1000):
""" Plot a major / minor line plot from a copy number data source
"""
hover = bokeh.models.HoverTool(
tooltips=[
('segment_idx', '@segment_idx'),
('chromosome', '@chromosome'),
('start', '@start'),
('end', '@end'),
('major_raw', '@major_raw'),
('minor_raw', '@minor_raw'),
]
)
tools = [
bokeh.models.PanTool(dimensions='width'),
bokeh.models.WheelZoomTool(dimensions='width'),
bokeh.models.BoxZoomTool(),
bokeh.models.BoxSelectTool(),
bokeh.models.ResetTool(),
bokeh.models.TapTool(),
hover,
]
p = bokeh.plotting.Figure(
title=name + ' chromosome major/minor',
plot_width=width, plot_height=200,
tools=tools,
# logo=None,
toolbar_location='above',
x_range=x_range,
y_range=[-0.5, 6.5],
)
p.title.text_font_size = bokeh.core.properties.value('10pt')
p.quad(
top=major_column, bottom=0, left='plot_start', right='plot_end',
source=source, color='red', alpha=0.05, line_width=0)
p.quad(
top=minor_column, bottom=0, left='plot_start', right='plot_end',
source=source, color='blue', alpha=0.05, line_width=0)
p.segment(
y0=major_column, y1=major_column, x0='plot_start', x1='plot_end',
source=source, color='red', alpha=1.0, line_width=4)
p.segment(
y0=minor_column, y1=minor_column, x0='plot_start', x1='plot_end',
source=source, color='blue', alpha=1.0, line_width=2)
return p
def breakpoints_plot(source, x_range, width=1000):
""" Plot break ends from a breakpoint source
"""
hover = bokeh.models.HoverTool(
tooltips=[
('prediction_id', '@prediction_id'),
('chromosome', '@chromosome'),
('position', '@position'),
('strand', '@strand'),
('other_chromosome', '@other_chromosome'),
('other_position', '@other_position'),
('other_strand', '@other_strand'),
('type', '@type'),
]
)
tools = [
bokeh.models.PanTool(dimensions='width'),
bokeh.models.WheelZoomTool(dimensions='width'),
bokeh.models.BoxSelectTool(),
bokeh.models.ResetTool(),
bokeh.models.TapTool(),
hover,
]
p = bokeh.plotting.Figure(
title='break ends',
plot_width=width, plot_height=150,
tools=tools,
# logo=None,
x_range=x_range,
y_range=['+', '-'],
)
p.title.text_font_size = bokeh.core.properties.value('10pt')
p.triangle(
x='plot_position', y='strand', size=10, angle='strand_angle',
line_color='grey', fill_color='clonality_color', alpha=1.0,
source=source)
return p
def setup_genome_plot_axes(p, chromosome_plot_info):
""" Configure axes of a genome view
"""
chromosomes = list(chromosome_plot_info['chromosome'].values)
chromosome_bounds = [0] + list(chromosome_plot_info['chromosome_plot_end'].values)
chromosome_mids = list(chromosome_plot_info['chromosome_plot_mid'].values)
p.xgrid.ticker = bokeh.models.FixedTicker(ticks=[-1] + chromosome_bounds + [chromosome_bounds[-1] + 1])
p.xgrid.band_fill_alpha = 0.1
p.xgrid.band_fill_color = "navy"
p.xaxis[0].ticker = bokeh.models.FixedTicker(ticks=chromosome_bounds)
p.xaxis[0].major_label_text_font_size = '0pt'
p.text(x=chromosome_mids, y=-0.5, text=chromosomes, text_font_size=bokeh.core.properties.value('0.5em'), text_align='center')
def create_chromosome_plot_info(cnv, chromosome=''):
""" Create information about chromosome start ends for genome view
"""
cnv['chromosome_index'] = cnv['chromosome'].apply(lambda a: chromosome_indices[a])
cnv.sort_values(['chromosome_index', 'start'], inplace=True)
info = (
cnv.groupby('chromosome', sort=False)['end']
.max().reset_index().rename(columns={'end': 'chromosome_length'}))
info['chromosome_plot_end'] = np.cumsum(info['chromosome_length'])
info['chromosome_plot_start'] = info['chromosome_plot_end'].shift(1)
info.loc[info.index[0], 'chromosome_plot_start'] = 0
info['chromosome_plot_mid'] = 0.5 * (info['chromosome_plot_start'] + info['chromosome_plot_end'])
return info
def prepare_cnv_data(cnv, chromosome_plot_info, smooth_segments=False):
""" Prepare copy number data for loading in the a data source
"""
# Group segments with same state
if smooth_segments:
cnv['chromosome_index'] = cnv['chromosome'].apply(lambda a: chromosome_indices[a])
cnv['diff'] = cnv[['chromosome_index', 'major_1', 'major_2', 'minor_1', 'minor_2']].diff().abs().sum(axis=1)
cnv['is_diff'] = (cnv['diff'] != 0)
cnv['cn_group'] = cnv['is_diff'].cumsum()
def agg_segments(df):
stable_cols = [
'chromosome',
'major_1',
'major_2',
'minor_1',
'minor_2',
'major_raw_e',
'minor_raw_e',
]
a = df[stable_cols].iloc[0]
a['start'] = df['start'].min()
a['end'] = df['end'].max()
a['length'] = df['length'].sum()
length_normalized_cols = [
'major_raw',
'minor_raw',
]
for col in length_normalized_cols:
a[col] = (df[col] * df['length']).sum() / (df['length'].sum() + 1e-16)
return a
cnv = cnv.groupby('cn_group').apply(agg_segments)
# Scatter size scaled by segment length
cnv['scatter_size'] = 2. * | np.sqrt(cnv['length'] / 1e6) | numpy.sqrt |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
| np.timedelta64(30, 's') | numpy.timedelta64 |
import time
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.linear_model import LogisticRegression
#from sklearn.experimental import enable_iterative_imputer
#from sklearn.impute import IterativeImputer
import STowl_multi as st
import multiprocessing as mp
import os
import math
############################################################################################################
m = 5
n = 1000
nindpt = 5000
d = 10
num_imp = 5
cost_vec = np.array([1/128,1/64,1/32,1/16,1/8,1/4,1/2,1.0])
tuned_paras = [{'C': cost_vec}]
itermax = 50
itertol = 1e-4
studySetting = 'longitudinal'
tuneSetting = 'exponential'
obsSetting = 'observational'
############################################################################################################
def simulation(seed_base):
np.random.seed(1234+seed_base)
########################################################################################################
## read in generated simulation datasets by R ##########################################################
data_indpt = pd.read_csv("".join(["data_indpt",str(seed_base+1),".txt"]))
data = pd.read_csv("".join(["data",str(seed_base+1),".txt"]))
data_indpt = data_indpt.to_numpy()
data = data.to_numpy()
Xindpt = data_indpt[:,:d]
Tindpt = data_indpt[:,d]
Xall = data[:,:d]
Aall = data[:,d]
Tall = data[:,d+1]
B_mat = data[:,(d+2):(d+2+m)]
miss_mat = data[:,(d+2+m):(d+2+m*2)]
dataLabel = data[:,d+2+m*2]
valid_index = [item for sublist in np.where(np.isnan(dataLabel) == False) for item in sublist]
Xall = Xall[valid_index,:]
Aall = Aall[valid_index]
Tall = Tall[valid_index]
B_mat = B_mat[valid_index,:]
miss_mat = miss_mat[valid_index,:]
dataLabel = dataLabel[valid_index]
n_valid = len(dataLabel)
dataLabel = np.asarray([int(dataLabel[i]) for i in range(n_valid)])
########################################################################################################
## check the unique values of dataLabel, the labels are ordered
uniqueIndex = list(set(dataLabel))
if uniqueIndex[-1] != m-1:
dresult = dict()
dresult['cost_tune'] = np.nan
dresult['par_tune'] = np.nan
dresult['acc_all_tune'] = np.nan
dresult['acc_all_1CV'] = np.nan
dresult['acc_all_allCV'] = np.nan
dresult['acc_all_impCV'] = np.nan
dresult['acc_indpt_tune'] = np.nan
dresult['acc_indpt_1CV'] = np.nan
dresult['acc_indpt_allCV'] = np.nan
dresult['acc_indpt_impCV'] = np.nan
dresult['evf_B_tune'] = np.nan
dresult['evf_B_1CV'] = np.nan
dresult['evf_B_allCV'] = np.nan
dresult['evf_B_impCV'] = np.nan
dresult['ts_tune'] = np.nan
dresult['ts_1CV'] = np.nan
dresult['ts_allCV'] = np.nan
dresult['ts_impCV'] = np.nan
dresult['time_1CV'] = np.nan
dresult['time_allCV'] = np.nan
dresult['time_impCV'] = np.nan
dresult['time_tune'] = np.nan
dresult['n_valid'] = n_valid
else:
Ball = np.full(n_valid, np.nan)
for i in range(n_valid):
Ball[i] = B_mat[i, dataLabel[i]]
#########################################################################################################
## determine propensity score ###########################################################################
propenScore = st.propensityScore(Xall, Aall, uniqueIndex, dataLabel)
pall = propenScore.p(obsSetting=obsSetting)
#########################################################################################################
## OWL on S_m only ######################################################################################
start_time_1CV = time.time()
index = [item for sublist in np.where(dataLabel == m-1) for item in sublist]
Xm = Xall[index,:]
Bm = Ball[index]
pm = pall[index]
Am = Aall[index]
model1 = GridSearchCV(svm.SVC(kernel='linear'), tuned_paras, cv=5, scoring='accuracy', fit_params={'sample_weight': Bm/pm})
model1.fit(Xm, Am)
time_1CV = time.time()-start_time_1CV
predAll_model1 = model1.best_estimator_.predict(Xall)
acc_all_1CV = st.evalPred(predAll_model1, Tall).acc()
predIndpt_model1 = model1.best_estimator_.predict(Xindpt)
acc_indpt_1CV = st.evalPred(predIndpt_model1, Tindpt).acc()
ts1 = st.tuneStat(Xall, Aall, Ball, m, uniqueIndex, dataLabel, model1)
ts_1CV = ts1.tsSSE(model='linear')
evf1 = st.EVF()
evf1.evfCal(Xall, Aall, Ball, uniqueIndex, dataLabel, model1)
evf_B_1CV = evf1.evfSeq
############################################################################################################
## OWL on S ################################################################################################
start_time_allCV = time.time()
model = GridSearchCV(svm.SVC(kernel='linear'), tuned_paras, cv=5, scoring='accuracy',fit_params={'sample_weight': Ball/pall})
model.fit(Xall, Aall)
time_allCV = time.time()-start_time_allCV
predAll_model = model.best_estimator_.predict(Xall)
acc_all_allCV = st.evalPred(predAll_model, Tall).acc()
predIndpt_model = model.best_estimator_.predict(Xindpt)
acc_indpt_allCV = st.evalPred(predIndpt_model, Tindpt).acc()
ts = st.tuneStat(Xall, Aall, Ball, m, uniqueIndex, dataLabel, model)
ts_allCV = ts.tsSSE(model='linear')
evf = st.EVF()
evf.evfCal(Xall, Aall, Ball, uniqueIndex, dataLabel, model)
evf_B_allCV = evf.evfSeq
############################################################################################################
## MICE imputation #########################################################################################
start_time_impCV = time.time()
acc_all_vec = np.zeros(num_imp)
acc_indpt_vec = np.zeros(num_imp)
ts_vec = | np.zeros(num_imp) | numpy.zeros |
import os, shutil
import numpy as np
import time
import datetime
import torch
import torchvision
from torch import optim
from torch.autograd import Variable
import torch.nn.functional as F
from utils.mask_functions import write_txt
from models.network import U_Net, R2U_Net, AttU_Net, R2AttU_Net
from models.linknet import LinkNet34
from models.deeplabv3.deeplabv3plus import DeepLabV3Plus
import csv
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
import tqdm
from backboned_unet import Unet
from utils.loss import GetLoss, RobustFocalLoss2d, BCEDiceLoss, SoftBCEDiceLoss, SoftBceLoss, LovaszLoss
from torch.utils.tensorboard import SummaryWriter
import segmentation_models_pytorch as smp
from models.Transpose_unet.unet.model import Unet as Unet_t
from models.octave_unet.unet.model import OctaveUnet
import pandas as pd
class Train(object):
def __init__(self, config, train_loader, valid_loader):
# Data loader
self.train_loader = train_loader
self.valid_loader = valid_loader
# Models
self.unet = None
self.optimizer = None
self.img_ch = config.img_ch
self.output_ch = config.output_ch
self.criterion = SoftBCEDiceLoss(weight=[0.25, 0.75])
# self.criterion = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(50))
self.criterion_stage2 = SoftBCEDiceLoss(weight=[0.25, 0.75])
self.criterion_stage3 = SoftBCEDiceLoss(weight=[0.25, 0.75])
self.model_type = config.model_type
self.t = config.t
self.mode = config.mode
self.resume = config.resume
# Hyper-parameters
self.lr = config.lr
self.lr_stage2 = config.lr_stage2
self.lr_stage3 = config.lr_stage3
self.start_epoch, self.max_dice = 0, 0
self.weight_decay = config.weight_decay
self.weight_decay_stage2 = config.weight_decay
self.weight_decay_stage3 = config.weight_decay
# save set
self.save_path = config.save_path
if 'choose_threshold' not in self.mode:
TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.datetime.now())
self.writer = SummaryWriter(log_dir=self.save_path+'/'+TIMESTAMP)
# 配置参数
self.epoch_stage1 = config.epoch_stage1
self.epoch_stage1_freeze = config.epoch_stage1_freeze
self.epoch_stage2 = config.epoch_stage2
self.epoch_stage2_accumulation = config.epoch_stage2_accumulation
self.accumulation_steps = config.accumulation_steps
self.epoch_stage3 = config.epoch_stage3
self.epoch_stage3_accumulation = config.epoch_stage3_accumulation
# 模型初始化
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.build_model()
def build_model(self):
print("Using model: {}".format(self.model_type))
"""Build generator and discriminator."""
if self.model_type == 'U_Net':
self.unet = U_Net(img_ch=3, output_ch=self.output_ch)
elif self.model_type == 'R2U_Net':
self.unet = R2U_Net(img_ch=3, output_ch=self.output_ch, t=self.t)
elif self.model_type == 'AttU_Net':
self.unet = AttU_Net(img_ch=3, output_ch=self.output_ch)
elif self.model_type == 'R2AttU_Net':
self.unet = R2AttU_Net(img_ch=3, output_ch=self.output_ch, t=self.t)
elif self.model_type == 'unet_resnet34':
# self.unet = Unet(backbone_name='resnet34', pretrained=True, classes=self.output_ch)
self.unet = smp.Unet('resnet34', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_resnet50':
self.unet = smp.Unet('resnet50', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_se_resnext50_32x4d':
self.unet = smp.Unet('se_resnext50_32x4d', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_densenet121':
self.unet = smp.Unet('densenet121', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_resnet34_t':
self.unet = Unet_t('resnet34', encoder_weights='imagenet', activation=None, use_ConvTranspose2d=True)
elif self.model_type == 'unet_resnet34_oct':
self.unet = OctaveUnet('resnet34', encoder_weights='imagenet', activation=None)
elif self.model_type == 'linknet':
self.unet = LinkNet34(num_classes=self.output_ch)
elif self.model_type == 'deeplabv3plus':
self.unet = DeepLabV3Plus(model_backbone='res50_atrous', num_classes=self.output_ch)
elif self.model_type == 'pspnet_resnet34':
self.unet = smp.PSPNet('resnet34', encoder_weights='imagenet', classes=1, activation=None)
if torch.cuda.is_available():
self.unet = torch.nn.DataParallel(self.unet)
self.criterion = self.criterion.cuda()
self.criterion_stage2 = self.criterion_stage2.cuda()
self.criterion_stage3 = self.criterion_stage3.cuda()
self.unet.to(self.device)
def print_network(self, model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print(name)
print("The number of parameters: {}".format(num_params))
def reset_grad(self):
"""Zero the gradient buffers."""
self.unet.zero_grad()
def save_checkpoint(self, state, stage, index, is_best):
# 保存权重,每一epoch均保存一次,若为最优,则复制到最优权重;index可以区分不同的交叉验证
pth_path = os.path.join(self.save_path, '%s_%d_%d.pth' % (self.model_type, stage, index))
torch.save(state, pth_path)
if is_best:
print('Saving Best Model.')
write_txt(self.save_path, 'Saving Best Model.')
shutil.copyfile(os.path.join(self.save_path, '%s_%d_%d.pth' % (self.model_type, stage, index)), os.path.join(self.save_path, '%s_%d_%d_best.pth' % (self.model_type, stage, index)))
def load_checkpoint(self, load_optimizer=True):
# Load the pretrained Encoder
weight_path = os.path.join(self.save_path, self.resume)
if os.path.isfile(weight_path):
checkpoint = torch.load(weight_path)
# 加载模型的参数,学习率,优化器,开始的epoch,最小误差等
if torch.cuda.is_available:
self.unet.module.load_state_dict(checkpoint['state_dict'])
else:
self.unet.load_state_dict(checkpoint['state_dict'])
self.start_epoch = checkpoint['epoch']
self.max_dice = checkpoint['max_dice']
if load_optimizer:
self.lr = checkpoint['lr']
self.optimizer.load_state_dict(checkpoint['optimizer'])
print('%s is Successfully Loaded from %s' % (self.model_type, weight_path))
write_txt(self.save_path, '%s is Successfully Loaded from %s' % (self.model_type, weight_path))
else:
raise FileNotFoundError("Can not find weight file in {}".format(weight_path))
def train(self, index):
# self.optimizer = optim.Adam([{'params': self.unet.decoder.parameters(), 'lr': 1e-4}, {'params': self.unet.encoder.parameters(), 'lr': 1e-6},])
self.optimizer = optim.Adam(self.unet.module.parameters(), self.lr, weight_decay=self.weight_decay)
# 若训练到一半暂停了,则需要加载之前训练的参数,并加载之前学习率 TODO:resume学习率没有接上,所以resume暂时无法使用
if self.resume:
self.load_checkpoint(load_optimizer=True)
'''
CosineAnnealingLR:若存在['initial_lr'],则从initial_lr开始衰减;
若不存在,则执行CosineAnnealingLR会在optimizer.param_groups中添加initial_lr键值,其值等于lr
重置初始学习率,在load_checkpoint中会加载优化器,但其中的initial_lr还是之前的,所以需要覆盖为self.lr,让其从self.lr衰减
'''
self.optimizer.param_groups[0]['initial_lr'] = self.lr
stage1_epoches = self.epoch_stage1 - self.start_epoch
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, stage1_epoches+10)
# 防止训练到一半暂停重新训练,日志被覆盖
global_step_before = self.start_epoch*len(self.train_loader)
for epoch in range(self.start_epoch, self.epoch_stage1):
epoch += 1
self.unet.train(True)
# 学习率重启
# if epoch == 30:
# self.optimizer.param_groups[0]['initial_lr'] = 0.0001
# lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, 25)
epoch_loss = 0
tbar = tqdm.tqdm(self.train_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
masks = masks.to(self.device)
# SR : Segmentation Result
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = self.criterion(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
for loss_index, loss_item in enumerate(loss_set):
if loss_index > 0:
loss_name = 'stage1_loss_%d' % loss_index
self.writer.add_scalar(loss_name, loss_item.item(), global_step_before + i)
loss = loss_set[0]
else:
loss = loss_set
epoch_loss += loss.item()
# Backprop + optimize
self.reset_grad()
loss.backward()
self.optimizer.step()
params_groups_lr = str()
for group_ind, param_group in enumerate(self.optimizer.param_groups):
params_groups_lr = params_groups_lr + 'params_group_%d' % (group_ind) + ': %.12f, ' % (param_group['lr'])
# 保存到tensorboard,每一步存储一个
self.writer.add_scalar('Stage1_train_loss', loss.item(), global_step_before+i)
descript = "Train Loss: %.7f, lr: %s" % (loss.item(), params_groups_lr)
tbar.set_description(desc=descript)
# 更新global_step_before为下次迭代做准备
global_step_before += len(tbar)
# Print the log info
print('Finish Stage1 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage1, epoch_loss/len(tbar)))
write_txt(self.save_path, 'Finish Stage1 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage1, epoch_loss/len(tbar)))
# 验证模型,保存权重,并保存日志
loss_mean, dice_mean = self.validation(stage=1)
if dice_mean > self.max_dice:
is_best = True
self.max_dice = dice_mean
else: is_best = False
self.lr = lr_scheduler.get_lr()
state = {'epoch': epoch,
'state_dict': self.unet.module.state_dict(),
'max_dice': self.max_dice,
'optimizer' : self.optimizer.state_dict(),
'lr' : self.lr}
self.save_checkpoint(state, 1, index, is_best)
self.writer.add_scalar('Stage1_val_loss', loss_mean, epoch)
self.writer.add_scalar('Stage1_val_dice', dice_mean, epoch)
self.writer.add_scalar('Stage1_lr', self.lr[0], epoch)
# 学习率衰减
lr_scheduler.step()
def train_stage2(self, index):
# # 冻结BN层, see https://zhuanlan.zhihu.com/p/65439075 and https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/discussion/100736591271 for more information
# def set_bn_eval(m):
# classname = m.__class__.__name__
# if classname.find('BatchNorm') != -1:
# m.eval()
# self.unet.apply(set_bn_eval)
# self.optimizer = optim.Adam([{'params': self.unet.decoder.parameters(), 'lr': 1e-5}, {'params': self.unet.encoder.parameters(), 'lr': 1e-7},])
self.optimizer = optim.Adam(self.unet.module.parameters(), self.lr_stage2, weight_decay=self.weight_decay_stage2)
# 加载的resume分为两种情况:之前没有训练第二个阶段,现在要加载第一个阶段的参数;第二个阶段训练了一半要继续训练
if self.resume:
# 若第二个阶段训练一半,要重新加载 TODO
if self.resume.split('_')[2] == '2':
self.load_checkpoint(load_optimizer=True) # 当load_optimizer为True会重新加载学习率和优化器
'''
CosineAnnealingLR:若存在['initial_lr'],则从initial_lr开始衰减;
若不存在,则执行CosineAnnealingLR会在optimizer.param_groups中添加initial_lr键值,其值等于lr
重置初始学习率,在load_checkpoint中会加载优化器,但其中的initial_lr还是之前的,所以需要覆盖为self.lr,让其从self.lr衰减
'''
self.optimizer.param_groups[0]['initial_lr'] = self.lr
# 若第一阶段结束后没有直接进行第二个阶段,中间暂停了
elif self.resume.split('_')[2] == '1':
self.load_checkpoint(load_optimizer=False)
self.start_epoch = 0
self.max_dice = 0
# 第一阶段结束后直接进行第二个阶段,中间并没有暂停
else:
self.start_epoch = 0
self.max_dice = 0
# 防止训练到一半暂停重新训练,日志被覆盖
global_step_before = self.start_epoch*len(self.train_loader)
stage2_epoches = self.epoch_stage2 - self.start_epoch
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, stage2_epoches+5)
for epoch in range(self.start_epoch, self.epoch_stage2):
epoch += 1
self.unet.train(True)
epoch_loss = 0
self.reset_grad() # 梯度累加的时候需要使用
tbar = tqdm.tqdm(self.train_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
masks = masks.to(self.device)
assert images.size(2) == 1024
# SR : Segmentation Result
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = self.criterion_stage2(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
for loss_index, loss_item in enumerate(loss_set):
if loss_index > 0:
loss_name = 'stage2_loss_%d' % loss_index
self.writer.add_scalar(loss_name, loss_item.item(), global_step_before + i)
loss = loss_set[0]
else:
loss = loss_set
epoch_loss += loss.item()
# Backprop + optimize, see https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-manually-to-zero-in-pytorch/4903/20 for Accumulating Gradients
if epoch <= self.epoch_stage2 - self.epoch_stage2_accumulation:
self.reset_grad()
loss.backward()
self.optimizer.step()
else:
# loss = loss / self.accumulation_steps # Normalize our loss (if averaged)
loss.backward() # Backward pass
if (i+1) % self.accumulation_steps == 0: # Wait for several backward steps
self.optimizer.step() # Now we can do an optimizer step
self.reset_grad()
params_groups_lr = str()
for group_ind, param_group in enumerate(self.optimizer.param_groups):
params_groups_lr = params_groups_lr + 'params_group_%d' % (group_ind) + ': %.12f, ' % (param_group['lr'])
# 保存到tensorboard,每一步存储一个
self.writer.add_scalar('Stage2_train_loss', loss.item(), global_step_before+i)
descript = "Train Loss: %.7f, lr: %s" % (loss.item(), params_groups_lr)
tbar.set_description(desc=descript)
# 更新global_step_before为下次迭代做准备
global_step_before += len(tbar)
# Print the log info
print('Finish Stage2 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage2, epoch_loss/len(tbar)))
write_txt(self.save_path, 'Finish Stage2 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage2, epoch_loss/len(tbar)))
# 验证模型,保存权重,并保存日志
loss_mean, dice_mean = self.validation(stage=2)
if dice_mean > self.max_dice:
is_best = True
self.max_dice = dice_mean
else: is_best = False
self.lr = lr_scheduler.get_lr()
state = {'epoch': epoch,
'state_dict': self.unet.module.state_dict(),
'max_dice': self.max_dice,
'optimizer' : self.optimizer.state_dict(),
'lr' : self.lr}
self.save_checkpoint(state, 2, index, is_best)
self.writer.add_scalar('Stage2_val_loss', loss_mean, epoch)
self.writer.add_scalar('Stage2_val_dice', dice_mean, epoch)
self.writer.add_scalar('Stage2_lr', self.lr[0], epoch)
# 学习率衰减
lr_scheduler.step()
# stage3, 接着stage2的训练,只训练有mask的样本
def train_stage3(self, index):
# # 冻结BN层, see https://zhuanlan.zhihu.com/p/65439075 and https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/discussion/100736591271 for more information
# def set_bn_eval(m):
# classname = m.__class__.__name__
# if classname.find('BatchNorm') != -1:
# m.eval()
# self.unet.apply(set_bn_eval)
# self.optimizer = optim.Adam([{'params': self.unet.decoder.parameters(), 'lr': 1e-5}, {'params': self.unet.encoder.parameters(), 'lr': 1e-7},])
self.optimizer = optim.Adam(self.unet.module.parameters(), self.lr_stage3, weight_decay=self.weight_decay_stage3)
# 如果是 train_stage23,则resume只在第二阶段起作用
if self.mode == 'train_stage23':
self.resume = None
# 加载的resume分为两种情况:之前没有训练第三个阶段,现在要加载第二个阶段的参数;第三个阶段训练了一半要继续训练
if self.resume:
# 若第三个阶段训练一半,要重新加载 TODO
if self.resume.split('_')[2] == '3':
self.load_checkpoint(load_optimizer=True) # 当load_optimizer为True会重新加载学习率和优化器
'''
CosineAnnealingLR:若存在['initial_lr'],则从initial_lr开始衰减;
若不存在,则执行CosineAnnealingLR会在optimizer.param_groups中添加initial_lr键值,其值等于lr
重置初始学习率,在load_checkpoint中会加载优化器,但其中的initial_lr还是之前的,所以需要覆盖为self.lr,让其从self.lr衰减
'''
self.optimizer.param_groups[0]['initial_lr'] = self.lr
# 若第二阶段结束后没有直接进行第三个阶段,中间暂停了
elif self.resume.split('_')[2] == '2':
self.load_checkpoint(load_optimizer=False)
self.start_epoch = 0
self.max_dice = 0
# 第二阶段结束后直接进行第三个阶段,中间并没有暂停
else:
print('start stage3 after stage2 directly!')
self.start_epoch = 0
self.max_dice = 0
# 防止训练到一半暂停重新训练,日志被覆盖
global_step_before = self.start_epoch*len(self.train_loader)
stage3_epoches = self.epoch_stage3 - self.start_epoch
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, stage3_epoches+5)
for epoch in range(self.start_epoch, self.epoch_stage3):
epoch += 1
self.unet.train(True)
epoch_loss = 0
self.reset_grad() # 梯度累加的时候需要使用
tbar = tqdm.tqdm(self.train_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
masks = masks.to(self.device)
assert images.size(2) == 1024
# SR : Segmentation Result
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = self.criterion_stage3(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
for loss_index, loss_item in enumerate(loss_set):
if loss_index > 0:
loss_name = 'stage3_loss_%d' % loss_index
self.writer.add_scalar(loss_name, loss_item.item(), global_step_before + i)
loss = loss_set[0]
else:
loss = loss_set
epoch_loss += loss.item()
# Backprop + optimize, see https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-manually-to-zero-in-pytorch/4903/20 for Accumulating Gradients
if epoch <= self.epoch_stage3 - self.epoch_stage3_accumulation:
self.reset_grad()
loss.backward()
self.optimizer.step()
else:
# loss = loss / self.accumulation_steps # Normalize our loss (if averaged)
loss.backward() # Backward pass
if (i+1) % self.accumulation_steps == 0: # Wait for several backward steps
self.optimizer.step() # Now we can do an optimizer step
self.reset_grad()
params_groups_lr = str()
for group_ind, param_group in enumerate(self.optimizer.param_groups):
params_groups_lr = params_groups_lr + 'params_group_%d' % (group_ind) + ': %.12f, ' % (param_group['lr'])
# 保存到tensorboard,每一步存储一个
self.writer.add_scalar('Stage3_train_loss', loss.item(), global_step_before+i)
descript = "Train Loss: %.7f, lr: %s" % (loss.item(), params_groups_lr)
tbar.set_description(desc=descript)
# 更新global_step_before为下次迭代做准备
global_step_before += len(tbar)
# Print the log info
print('Finish Stage3 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage3, epoch_loss/len(tbar)))
write_txt(self.save_path, 'Finish Stage3 Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.epoch_stage3, epoch_loss/len(tbar)))
# 验证模型,保存权重,并保存日志
loss_mean, dice_mean = self.validation(stage=3)
if dice_mean > self.max_dice:
is_best = True
self.max_dice = dice_mean
else: is_best = False
self.lr = lr_scheduler.get_lr()
state = {'epoch': epoch,
'state_dict': self.unet.module.state_dict(),
'max_dice': self.max_dice,
'optimizer' : self.optimizer.state_dict(),
'lr' : self.lr}
self.save_checkpoint(state, 3, index, is_best)
self.writer.add_scalar('Stage3_val_loss', loss_mean, epoch)
self.writer.add_scalar('Stage3_val_dice', dice_mean, epoch)
self.writer.add_scalar('Stage3_lr', self.lr[0], epoch)
# 学习率衰减
lr_scheduler.step()
def validation(self, stage=1):
# 验证的时候,train(False)是必须的0,设置其中的BN层、dropout等为eval模式
# with torch.no_grad(): 可以有,在这个上下文管理器中,不反向传播,会加快速度,可以使用较大batch size
self.unet.eval()
tbar = tqdm.tqdm(self.valid_loader)
loss_sum, dice_sum = 0, 0
if stage == 1:
criterion = self.criterion
elif stage == 2:
criterion = self.criterion_stage2
elif stage == 3:
criterion = self.criterion_stage3
with torch.no_grad():
for i, (images, masks) in enumerate(tbar):
images = images.to(self.device)
masks = masks.to(self.device)
net_output = self.unet(images)
net_output_flat = net_output.view(net_output.size(0), -1)
masks_flat = masks.view(masks.size(0), -1)
loss_set = criterion(net_output_flat, masks_flat)
try:
loss_num = len(loss_set)
except:
loss_num = 1
# 依据返回的损失个数分情况处理
if loss_num > 1:
loss = loss_set[0]
else:
loss = loss_set
loss_sum += loss.item()
# 计算dice系数,预测出的矩阵要经过sigmoid含义以及阈值,阈值默认为0.5
net_output_flat_sign = (torch.sigmoid(net_output_flat)>0.5).float()
dice = self.dice_overall(net_output_flat_sign, masks_flat).mean()
dice_sum += dice.item()
descript = "Val Loss: {:.7f}, dice: {:.7f}".format(loss.item(), dice.item())
tbar.set_description(desc=descript)
loss_mean, dice_mean = loss_sum/len(tbar), dice_sum/len(tbar)
print("Val Loss: {:.7f}, dice: {:.7f}".format(loss_mean, dice_mean))
write_txt(self.save_path, "Val Loss: {:.7f}, dice: {:.7f}".format(loss_mean, dice_mean))
return loss_mean, dice_mean
# dice for threshold selection
def dice_overall(self, preds, targs):
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
targs = targs.view(n, -1)
# preds, targs = preds.to(self.device), targs.to(self.device)
preds, targs = preds.cpu(), targs.cpu()
# tensor之间按位相成,求两个集合的交(只有1×1等于1)后。按照第二个维度求和,得到[batch size]大小的tensor,每一个值代表该输入图片真实类标与预测类标的交集大小
intersect = (preds * targs).sum(-1).float()
# tensor之间按位相加,求两个集合的并。然后按照第二个维度求和,得到[batch size]大小的tensor,每一个值代表该输入图片真实类标与预测类标的并集大小
union = (preds + targs).sum(-1).float()
'''
输入图片真实类标与预测类标无并集有两种情况:第一种为预测与真实均没有类标,此时并集之和为0;第二种为真实有类标,但是预测完全错误,此时并集之和不为0;
寻找输入图片真实类标与预测类标并集之和为0的情况,将其交集置为1,并集置为2,最后还有一个2*交集/并集,值为1;
其余情况,直接按照2*交集/并集计算,因为上面的并集并没有减去交集,所以需要拿2*交集,其最大值为1
'''
u0 = union == 0
intersect[u0] = 1
union[u0] = 2
return (2. * intersect / union)
def classify_score(self, preds, targs):
'''若当前图像中有mask,则为正类,若当前图像中无mask,则为负类。从分类的角度得分当前的准确率
Args:
preds: 预测出的mask矩阵
targs: 真实的mask矩阵
Return: 分类准确率
'''
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
targs = targs.view(n, -1)
# preds, targs = preds.to(self.device), targs.to(self.device)
preds_, targs_ = torch.sum(preds, 1), torch.sum(targs, 1)
preds_, targs_ = preds_ > 0, targs_ > 0
preds_, targs_ = preds_.cpu(), targs_.cpu()
score = torch.sum(preds_ == targs_)
return score.item()/n
def choose_threshold(self, model_path, index):
'''利用线性法搜索当前模型的最优阈值和最优像素阈值;先利用粗略搜索和精细搜索两个过程搜索出最优阈值,然后搜索出最优像素阈值;并保存搜索图
Args:
model_path: 当前模型权重的位置
index: 当前为第几个fold
Return: 最优阈值,最优像素阈值,最高得分
'''
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
stage = eval(model_path.split('/')[-1].split('_')[2])
print('Loaded from %s, using choose_threshold!' % model_path)
self.unet.eval()
with torch.no_grad():
# 先大概选取阈值范围
dices_big = []
thrs_big = np.arange(0.1, 1, 0.1) # 阈值列表
for th in thrs_big:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > th).to(self.device).float() # 大于阈值的归为1
# preds[preds.view(preds.shape[0],-1).sum(-1) < noise_th,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_big.append(sum(tmp) / len(tmp))
dices_big = np.array(dices_big)
best_thrs_big = thrs_big[dices_big.argmax()]
# 精细选取范围
dices_little = []
thrs_little = np.arange(best_thrs_big-0.05, best_thrs_big+0.05, 0.01) # 阈值列表
for th in thrs_little:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > th).to(self.device).float() # 大于阈值的归为1
# preds[preds.view(preds.shape[0],-1).sum(-1) < noise_th,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_little.append(sum(tmp) / len(tmp))
dices_little = np.array(dices_little)
# score = dices.max()
best_thr = thrs_little[dices_little.argmax()]
# 选最优像素阈值
if stage != 3:
dices_pixel = []
pixel_thrs = np.arange(0, 2304, 256) # 阈值列表
for pixel_thr in pixel_thrs:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > best_thr).to(self.device).float() # 大于阈值的归为1
preds[preds.view(preds.shape[0],-1).sum(-1) < pixel_thr,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_pixel.append(sum(tmp) / len(tmp))
dices_pixel = np.array(dices_pixel)
score = dices_pixel.max()
best_pixel_thr = pixel_thrs[dices_pixel.argmax()]
elif stage == 3:
best_pixel_thr, score = 0, dices_little.max()
print('best_thr:{}, best_pixel_thr:{}, score:{}'.format(best_thr, best_pixel_thr, score))
plt.figure(figsize=(10.4, 4.8))
plt.subplot(1, 3, 1)
plt.title('Large-scale search')
plt.plot(thrs_big, dices_big)
plt.subplot(1, 3, 2)
plt.title('Little-scale search')
plt.plot(thrs_little, dices_little)
plt.subplot(1, 3, 3)
plt.title('pixel thrs search')
if stage != 3:
plt.plot(pixel_thrs, dices_pixel)
plt.savefig(os.path.join(self.save_path, 'stage{}'.format(stage)+'_fold'+str(index)))
# plt.show()
plt.close()
return float(best_thr), float(best_pixel_thr), float(score)
def pred_mask_count(self, model_path, masks_bool, val_index, best_thr, best_pixel_thr):
'''加载模型,根据最优阈值和最优像素阈值,得到在验证集上的分类准确率。适用于训练的第二阶段使用 dice 选完阈值,查看分类准确率
Args:
model_path: 当前模型的权重路径
masks_bool: 全部数据集中的每个是否含有mask
val_index: 当前验证集的在全部数据集的下标
best_thr: 选出的最优阈值
best_pixel_thr: 选出的最优像素阈值
Return: None, 打印出有多少个真实情况有多少个正样本,实际预测出了多少个样本。但是不是很严谨,因为这不能代表正确率。
'''
count_true, count_pred = 0,0
for index1 in val_index:
if masks_bool[index1]:
count_true += 1
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
print('Loaded from %s' % model_path)
self.unet.eval()
with torch.no_grad():
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > best_thr).to(self.device).float() # 大于阈值的归为1
preds[preds.view(preds.shape[0],-1).sum(-1) < best_pixel_thr,...] = 0.0 # 过滤噪声点
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
for index2 in range(n):
pred = preds[index2, ...]
if torch.sum(pred) > 0:
count_pred += 1
tmp.append(self.dice_overall(preds, masks).mean())
print('score:', sum(tmp) / len(tmp))
print('count_true:{}, count_pred:{}'.format(count_true, count_pred))
def grid_search(self, thrs_big, pixel_thrs):
'''利用网格法搜索最优阈值和最优像素阈值
Args:
thrs_big: 网格法搜索时的一系列阈值
pixel_thrs: 网格搜索时的一系列像素阈值
Return: 最优阈值,最优像素阈值,最高得分,网络矩阵中每个位置的得分
'''
with torch.no_grad():
# 先大概选取阈值范围和像素阈值范围
dices_big = [] # 存放的是二维矩阵,每一行为每一个阈值下所有像素阈值得到的得分
for th in thrs_big:
dices_pixel = []
for pixel_thr in pixel_thrs:
tmp = []
tbar = tqdm.tqdm(self.valid_loader)
for i, (images, masks) in enumerate(tbar):
# GT : Ground Truth
images = images.to(self.device)
net_output = torch.sigmoid(self.unet(images))
preds = (net_output > th).to(self.device).float() # 大于阈值的归为1
preds[preds.view(preds.shape[0],-1).sum(-1) < pixel_thr,...] = 0.0 # 过滤噪声点
tmp.append(self.dice_overall(preds, masks).mean())
# tmp.append(self.classify_score(preds, masks))
dices_pixel.append(sum(tmp) / len(tmp))
dices_big.append(dices_pixel)
dices_big = np.array(dices_big)
print('粗略挑选最优阈值和最优像素阈值,dices_big_shape:{}'.format(np.shape(dices_big)))
re = np.where(dices_big == np.max(dices_big))
# 如果有多个最大值的处理方式
if np.shape(re)[1] != 1:
re = re[0]
best_thrs_big, best_pixel_thr = thrs_big[int(re[0])], pixel_thrs[int(re[1])]
best_thr, score = best_thrs_big, dices_big.max()
return best_thr, best_pixel_thr, score, dices_big
def choose_threshold_grid(self, model_path, index):
'''利用网格法搜索当前模型的最优阈值和最优像素阈值,分为粗略搜索和精细搜索两个过程;并保存热力图
Args:
model_path: 当前模型权重的位置
index: 当前为第几个fold
Return: 最优阈值,最优像素阈值,最高得分
'''
self.unet.module.load_state_dict(torch.load(model_path)['state_dict'])
stage = eval(model_path.split('/')[-1].split('_')[2])
print('Loaded from %s, using choose_threshold_grid!' % model_path)
self.unet.eval()
thrs_big1 = np.arange(0.60, 0.81, 0.015) # 阈值列表
pixel_thrs1 = np.arange(768, 2305, 256) # 像素阈值列表
best_thr1, best_pixel_thr1, score1, dices_big1 = self.grid_search(thrs_big1, pixel_thrs1)
print('best_thr1:{}, best_pixel_thr1:{}, score1:{}'.format(best_thr1, best_pixel_thr1, score1))
thrs_big2 = np.arange(best_thr1-0.015, best_thr1+0.015, 0.0075) # 阈值列表
pixel_thrs2 = np.arange(best_pixel_thr1-256, best_pixel_thr1+257, 128) # 像素阈值列表
best_thr2, best_pixel_thr2, score2, dices_big2 = self.grid_search(thrs_big2, pixel_thrs2)
print('best_thr2:{}, best_pixel_thr2:{}, score2:{}'.format(best_thr2, best_pixel_thr2, score2))
if score1 < score2: best_thr, best_pixel_thr, score, dices_big = best_thr2, best_pixel_thr2, score2, dices_big2
else: best_thr, best_pixel_thr, score, dices_big = best_thr1, best_pixel_thr1, score1, dices_big1
print('best_thr:{}, best_pixel_thr:{}, score:{}'.format(best_thr, best_pixel_thr, score))
f, (ax1, ax2) = plt.subplots(figsize=(14.4, 4.8), ncols=2)
cmap = sns.cubehelix_palette(start = 1.5, rot = 3, gamma=0.8, as_cmap = True)
data1 = pd.DataFrame(data=dices_big1, index=np.around(thrs_big1, 3), columns=pixel_thrs1)
sns.heatmap(data1, linewidths = 0.05, ax = ax1, vmax=np.max(dices_big1), vmin= | np.min(dices_big1) | numpy.min |
# -*- coding: utf-8 -*-
"""
Dataset for Mask R-CNN
Configurations and data loading code for COCO format.
@author: <NAME>
"""
import os
import sys
import time
import numpy as np
import json
# Download and install the Python coco tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools import mask as maskUtils
from mrcnn import model as modellib, utils
############################################################
# Dataset
############################################################
class TrashDataset(utils.Dataset):
def load_trash(self, data_dir, anno_file):
print("Loading Trash Data:" + str(data_dir) + "" + str(anno_file))
trash = COCO(os.path.join(data_dir, anno_file))
# Add classes
class_ids = sorted(trash.getCatIds())
for i in class_ids:
self.add_class("trash", i, trash.loadCats(i)[0]["name"])
# Add images
image_ids = list(trash.imgs.keys())
for i in image_ids:
current_annotation = []
for a in trash.loadAnns(trash.getAnnIds()):
if a["image_id"] == i:
current_annotation = a
self.add_image(
"trash", image_id=i,
path=os.path.join(data_dir, trash.imgs[i]['file_name']),
width=trash.imgs[i]["width"],
height=trash.imgs[i]["height"],
annotations=current_annotation) # annotations=[a for a in trash.loadAnns(trash.getAnnIds()) if a['image_id'] == str(i)]
return trash
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
image_info = self.image_info[image_id]
instance_masks = []
class_ids = []
annotation = image_info["annotations"]
if len(annotation) > 0:
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
# for annotation in annotations:
class_id = self.map_source_class_id(
"trash.{}".format(annotation["category_id"]))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() > 0:
# continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if len(class_ids) > 0:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = | np.array(class_ids, dtype=np.int32) | numpy.array |
"""Use the QC'd 12z 24 Hour files to adjust hourly data."""
import sys
import os
import datetime
import numpy as np
from scipy.interpolate import NearestNDInterpolator
import pygrib
from pyiem import iemre
from pyiem.util import ncopen, logger, utc
LOG = logger()
def merge(ts):
"""
Process an hour's worth of stage4 data into the hourly RE
"""
# Load up the 12z 24h total, this is what we base our deltas on
fn = ("/mesonet/ARCHIVE/data/%s/stage4/ST4.%s.24h.grib"
) % (ts.strftime("%Y/%m/%d"), ts.strftime("%Y%m%d%H"))
if not os.path.isfile(fn):
LOG.info("stage4_12z_adjust %s is missing", fn)
return False
grbs = pygrib.open(fn)
grb = grbs[1]
val = grb.values
lats, lons = grb.latlons()
# can save a bit of memory as we don't need all data
stride = slice(None, None, 3)
lats = np.ravel(lats[stride, stride])
lons = np.ravel(lons[stride, stride])
vals = np.ravel(val[stride, stride])
# Clip large values
vals = np.where(vals > 250., 0, vals)
nn = NearestNDInterpolator((lons, lats), vals)
xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)
stage4 = nn(xi, yi)
# Prevent Large numbers, negative numbers
stage4 = np.where(stage4 < 10000., stage4, 0.)
stage4 = np.where(stage4 < 0., 0., stage4)
ts0 = ts - datetime.timedelta(days=1)
offset0 = iemre.hourly_offset(ts0)
offset1 = iemre.hourly_offset(ts)
# Running at 12 UTC 1 Jan
if offset0 > offset1:
offset0 = 0
# Open up our RE file
with ncopen(iemre.get_hourly_ncname(ts.year), 'a', timeout=300) as nc:
iemre_total = np.sum(
nc.variables["p01m"][offset0:offset1, :, :], axis=0)
iemre_total = np.where(iemre_total > 0., iemre_total, 0.00024)
iemre_total = | np.where(iemre_total < 10000., iemre_total, 0.00024) | numpy.where |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import os
import cv2
import torch
import tqdm
from iopath.common.file_io import g_pathmgr
import slowfast.utils.checkpoint as cu
import slowfast.utils.logging as logging
from slowfast.datasets.ava_helper import parse_bboxes_file
from slowfast.datasets.cv2_transform import scale, scale_boxes
from slowfast.datasets.utils import get_sequence
from slowfast.models import build_model
from slowfast.utils import misc
from slowfast.visualization.utils import process_cv2_inputs
# from slowfast.visualization.video_visualizer import VideoVisualizer
logger = logging.get_logger(__name__)
class AVAVisualizerWithPrecomputedBox:
"""
Visualize action predictions for videos or folder of images with precomputed
and ground-truth boxes in AVA format.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
self.source = g_pathmgr.get_local_path(path=cfg.DEMO.INPUT_VIDEO)
self.fps = None
if g_pathmgr.isdir(self.source):
self.fps = cfg.DEMO.FPS
self.video_name = self.source.split("/")[-1]
self.source = os.path.join(
self.source, "{}_%06d.jpg".format(self.video_name)
)
else:
self.video_name = self.source.split("/")[-1]
self.video_name = self.video_name.split(".")[0]
self.cfg = cfg
self.cap = cv2.VideoCapture(self.source)
if self.fps is None:
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.display_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.display_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if not self.cap.isOpened():
raise IOError("Video {} cannot be opened".format(self.source))
self.output_file = None
if cfg.DEMO.OUTPUT_FILE != "":
self.output_file = self.get_output_file(cfg.DEMO.OUTPUT_FILE)
self.pred_boxes, self.gt_boxes = load_boxes_labels(
cfg,
self.video_name,
self.fps,
self.display_width,
self.display_height,
)
self.seq_length = cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE
self.no_frames_repeat = cfg.DEMO.SLOWMO
def get_output_file(self, path):
"""
Return a video writer object.
Args:
path (str): path to the output video file.
"""
return cv2.VideoWriter(
filename=path,
fourcc=cv2.VideoWriter_fourcc(*"mp4v"),
fps=float(30),
frameSize=(self.display_width, self.display_height),
isColor=True,
)
def get_input_clip(self, keyframe_idx):
"""
Get input clip from the video/folder of images for a given
keyframe index.
Args:
keyframe_idx (int): index of the current keyframe.
Returns:
clip (list of tensors): formatted input clip(s) corresponding to
the current keyframe.
"""
seq = get_sequence(
keyframe_idx,
self.seq_length // 2,
self.cfg.DATA.SAMPLING_RATE,
self.total_frames,
)
clip = []
for frame_idx in seq:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
was_read, frame = self.cap.read()
if was_read:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = scale(self.cfg.DATA.TEST_CROP_SIZE, frame)
clip.append(frame)
else:
logger.error(
"Unable to read frame. Duplicating previous frame."
)
clip.append(clip[-1])
clip = process_cv2_inputs(clip, self.cfg)
return clip
def get_predictions(self):
"""
Predict and append prediction results to each box in each keyframe in
`self.pred_boxes` dictionary.
"""
# Set random seed from configs.
np.random.seed(self.cfg.RNG_SEED)
torch.manual_seed(self.cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(self.cfg.OUTPUT_DIR)
# Print config.
logger.info("Run demo with config:")
logger.info(self.cfg)
assert (
self.cfg.NUM_GPUS <= 1
), "Cannot run demo visualization on multiple GPUs."
# Build the video model and print model statistics.
model = build_model(self.cfg)
model.eval()
logger.info("Start loading model info")
misc.log_model_info(model, self.cfg, use_train_input=False)
logger.info("Start loading model weights")
cu.load_test_checkpoint(self.cfg, model)
logger.info("Finish loading model weights")
logger.info("Start making predictions for precomputed boxes.")
for keyframe_idx, boxes_and_labels in tqdm.tqdm(
self.pred_boxes.items()
):
inputs = self.get_input_clip(keyframe_idx)
boxes = boxes_and_labels[0]
boxes = torch.from_numpy(np.array(boxes)).float()
box_transformed = scale_boxes(
self.cfg.DATA.TEST_CROP_SIZE,
boxes,
self.display_height,
self.display_width,
)
# Pad frame index for each box.
box_inputs = torch.cat(
[
torch.full((box_transformed.shape[0], 1), float(0)),
box_transformed,
],
axis=1,
)
if self.cfg.NUM_GPUS:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
box_inputs = box_inputs.cuda()
preds = model(inputs, box_inputs)
preds = preds.detach()
if self.cfg.NUM_GPUS:
preds = preds.cpu()
boxes_and_labels[1] = preds
# def draw_video(self):
# """
# Draw predicted and ground-truth (if provided) results on the video/folder of images.
# Write the visualized result to a video output file.
# """
# all_boxes = merge_pred_gt_boxes(self.pred_boxes, self.gt_boxes)
# common_classes = (
# self.cfg.DEMO.COMMON_CLASS_NAMES
# if len(self.cfg.DEMO.LABEL_FILE_PATH) != 0
# else None
# )
# video_vis = VideoVisualizer(
# num_classes=self.cfg.MODEL.NUM_CLASSES,
# class_names_path=self.cfg.DEMO.LABEL_FILE_PATH,
# top_k=self.cfg.TENSORBOARD.MODEL_VIS.TOPK_PREDS,
# thres=self.cfg.DEMO.COMMON_CLASS_THRES,
# lower_thres=self.cfg.DEMO.UNCOMMON_CLASS_THRES,
# common_class_names=common_classes,
# colormap=self.cfg.TENSORBOARD.MODEL_VIS.COLORMAP,
# mode=self.cfg.DEMO.VIS_MODE,
# )
# all_keys = sorted(all_boxes.keys())
# # Draw around the keyframe for 2/10 of the sequence length.
# # This is chosen using heuristics.
# draw_range = [
# self.seq_length // 2 - self.seq_length // 10,
# self.seq_length // 2 + self.seq_length // 10,
# ]
# draw_range_repeat = [
# draw_range[0],
# (draw_range[1] - draw_range[0]) * self.no_frames_repeat
# + draw_range[0],
# ]
# prev_buffer = []
# prev_end_idx = 0
# logger.info("Start Visualization...")
# for keyframe_idx in tqdm.tqdm(all_keys):
# pred_gt_boxes = all_boxes[keyframe_idx]
# # Find the starting index of the clip. If start_idx exceeds the beginning
# # of the video, we only choose valid frame from index 0.
# start_idx = max(0, keyframe_idx - self.seq_length // 2)
# # Number of frames from the start of the current clip and the
# # end of the previous clip.
# dist = start_idx - prev_end_idx
# # If there are unwritten frames in between clips.
# if dist >= 0:
# # Get the frames in between previous clip and current clip.
# frames = self._get_frame_range(prev_end_idx, dist)
# # We keep a buffer of frames for overlapping visualization.
# # Write these to the output file.
# for frame in prev_buffer:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# self.display(frame)
# # Write them to output file without any visualization
# # since they don't have any corresponding keyframes.
# for frame in frames:
# self.display(frame)
# prev_buffer = []
# num_new_frames = self.seq_length
# # If there are overlapping frames in between clips.
# elif dist < 0:
# # Flush all ready frames.
# for frame in prev_buffer[:dist]:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# self.display(frame)
# prev_buffer = prev_buffer[dist:]
# num_new_frames = self.seq_length + dist
# # Obtain new frames for the current clip from the input video file.
# new_frames = self._get_frame_range(
# max(start_idx, prev_end_idx), num_new_frames
# )
# new_frames = [
# cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in new_frames
# ]
# clip = prev_buffer + new_frames
# # Calculate the end of this clip. This will be `prev_end_idx` for the
# # next iteration.
# prev_end_idx = max(start_idx, prev_end_idx) + len(new_frames)
# # For each precomputed or gt boxes.
# for i, boxes in enumerate(pred_gt_boxes):
# if i == 0:
# repeat = self.no_frames_repeat
# current_draw_range = draw_range
# else:
# repeat = 1
# current_draw_range = draw_range_repeat
# # Make sure draw range does not fall out of end of clip.
# current_draw_range[1] = min(
# current_draw_range[1], len(clip) - 1
# )
# ground_truth = boxes[0]
# bboxes = boxes[1]
# label = boxes[2]
# # Draw predictions.
# clip = video_vis.draw_clip_range(
# clip,
# label,
# bboxes=torch.Tensor(bboxes),
# ground_truth=ground_truth,
# draw_range=current_draw_range,
# repeat_frame=repeat,
# )
# # Store the current clip as buffer.
# prev_buffer = clip
# # Write the remaining buffer to output file.
# for frame in prev_buffer:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# self.display(frame)
# # If we still have some remaining frames in the input file,
# # write those to the output file as well.
# if prev_end_idx < self.total_frames:
# dist = self.total_frames - prev_end_idx
# remaining_clip = self._get_frame_range(prev_end_idx, dist)
# for frame in remaining_clip:
# self.display(frame)
def __call__(self):
self.get_predictions()
self.draw_video()
def display(self, frame):
"""
Either display a single frame (BGR image) to a window or write to
an output file if output path is provided.
"""
if self.output_file is None:
cv2.imshow("SlowFast", frame)
else:
self.output_file.write(frame)
def _get_keyframe_clip(self, keyframe_idx):
"""
Return a clip corresponding to a keyframe index for visualization.
Args:
keyframe_idx (int): keyframe index.
"""
start_idx = max(0, keyframe_idx - self.seq_length // 2)
clip = self._get_frame_range(start_idx, self.seq_length)
return clip
def _get_frame_range(self, start_idx, num_frames):
"""
Return a clip of `num_frames` frames starting from `start_idx`. If not enough frames
from `start_idx`, return the remaining frames from `start_idx`.
Args:
start_idx (int): starting idx.
num_frames (int): number of frames in the returned clip.
"""
was_read = True
assert start_idx < self.total_frames, "Start index out of range."
self.cap.set(cv2.CAP_PROP_POS_FRAMES, start_idx)
all_frames = []
for _ in range(num_frames):
was_read, frame = self.cap.read()
if was_read:
all_frames.append(frame)
else:
break
return all_frames
def merge_pred_gt_boxes(pred_dict, gt_dict=None):
"""
Merge data from precomputed and ground-truth boxes dictionaries.
Args:
pred_dict (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`.
gt_dict (Optional[dict]): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes.
Returns:
merged_dict (dict): merged dictionary from `pred_dict` and `gt_dict` if given.
It is a dict which maps from `frame_idx` to a list of [`is_gt`, `boxes`, `labels`],
where `is_gt` is a boolean indicate whether the `boxes` and `labels` are ground-truth.
"""
merged_dict = {}
for key, item in pred_dict.items():
merged_dict[key] = [[False, item[0], item[1]]]
if gt_dict is not None:
for key, item in gt_dict.items():
if merged_dict.get(key) is None:
merged_dict[key] = [[True, item[0], item[1]]]
else:
merged_dict[key].append([True, item[0], item[1]])
return merged_dict
def load_boxes_labels(cfg, video_name, fps, img_width, img_height):
"""
Loading boxes and labels from AVA bounding boxes csv files.
Args:
cfg (CfgNode): config.
video_name (str): name of the given video.
fps (int or float): frames per second of the input video/images folder.
img_width (int): width of images in input video/images folder.
img_height (int): height of images in input video/images folder.
Returns:
preds_boxes (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes.
gt_boxes (dict): if cfg.DEMO.GT_BOXES is given, return similar dict as
all_pred_boxes but for ground-truth boxes.
"""
starting_second = cfg.DEMO.STARTING_SECOND
def sec_to_frameidx(sec):
return (sec - starting_second) * fps
def process_bboxes_dict(dictionary):
"""
Replace all `keyframe_sec` in `dictionary` with `keyframe_idx` and
merge all [`box_coordinate`, `box_labels`] pairs into
[`all_boxes_coordinates`, `all_boxes_labels`] for each `keyframe_idx`.
Args:
dictionary (dict): a dictionary which maps `frame_sec` to a list of `box`.
Each `box` is a [`box_coord`, `box_labels`] where `box_coord` is the
coordinates of box and 'box_labels` are the corresponding
labels for the box.
Returns:
new_dict (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` in `boxes` is a list of 4 box coordinates. `labels[i]`
is a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes.
"""
# Replace all keyframe_sec with keyframe_idx.
new_dict = {}
for keyframe_sec, boxes_and_labels in dictionary.items():
# Ignore keyframes with no boxes
if len(boxes_and_labels) == 0:
continue
keyframe_idx = sec_to_frameidx(keyframe_sec)
boxes, labels = list(zip(*boxes_and_labels))
# Shift labels from [1, n_classes] to [0, n_classes - 1].
labels = [[i - 1 for i in box_label] for box_label in labels]
boxes = | np.array(boxes) | numpy.array |
"""
Code for figure 2A.
Visualize a 3 block SBM with
fixed background fields.
<NAME>, Dec 2019.
"""
import numpy as np
import networkx as nx
import random
import matplotlib.pyplot as plt
import three_block_sbm_class as ThreeBlock
def plot_three_block(sbm_graph,three_block,fname="three_block_plot",color_on='background_field',label=None) :
N1 = N2 = N3 = 400
pos_block_1 = [ [ np.random.uniform(0,1) , | np.random.uniform(0,1.0) | numpy.random.uniform |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 16:13:39 2021
@author: ruizca
"""
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord, FK5
from astropy.table import Table, unique, join
from astropy.utils.console import color_print
from astropy_healpix import HEALPix
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
from matplotlib.patches import Polygon
from mocpy import MOC
from mocpy.mocpy import flatten_pixels
from scipy.stats import median_abs_deviation
from tqdm.auto import tqdm
from .. import rapidxmm
from .ecf import ECF
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
#plt.rc('text', usetex=True)
plt.rcParams['mathtext.fontset'] = "stix"
plt.rcParams['mathtext.rm'] = "STIXGeneral"
plt.rcParams['font.family'] = "STIXGeneral"
plt.rcParams["axes.formatter.use_mathtext"] = True
# Numpy random number generator
rng = np.random.default_rng()
def get_neighbours(npixel, hp, level=5):
# The central pixel is the first one
# The output of hp.neighbours always follows the
# same order, starting SW and rotating clockwise
neighbours_level = [None] * (level + 1)
neighbours_level[0] = [npixel]
npixel_neighbours = [npixel]
for i in range(1, level + 1):
neighbours_level[i] = hp.neighbours(neighbours_level[i - 1]).flatten()
npixel_neighbours += list(neighbours_level[i])
sorted_neighbours = Table()
sorted_neighbours["npixel"] = npixel_neighbours
sorted_neighbours["order"] = range(len(npixel_neighbours))
sorted_neighbours = unique(sorted_neighbours, keys=["npixel"])
sorted_neighbours.sort("order")
return sorted_neighbours
def get_bkg_npixels(src_center, nside, npixels=100):
order = np.log2(nside).astype(int)
bkg_moc_outer = MOC.from_cone(src_center.ra, src_center.dec, 120*u.arcsec, order)
bkg_moc_inner = MOC.from_cone(src_center.ra, src_center.dec, 60*u.arcsec, order)
bkg_moc = bkg_moc_outer.difference(bkg_moc_inner)
bkg_npixels = flatten_pixels(bkg_moc._interval_set._intervals, order)
return rng.choice(bkg_npixels, size=npixels, replace=False).tolist()
def get_bkg_data(npixel, obsid, hp):
src_center = hp.healpix_to_skycoord(npixel)
bkg_npixels = get_bkg_npixels(src_center, hp.nside, npixels=100)
bkg_data = rapidxmm.query_npixels(
bkg_npixels, obstype="pointed", instrum="PN"
)
mask = bkg_data["obsid"] == obsid
bkg_data = bkg_data[mask]
if len(bkg_data) < 15:
bkg_data = None
return bkg_data
def stats_bootstrap(src, bkg, exp, eef, ecf, ac=None, nbkg=None, nsim=1000):
# Calculate median and MAD for the stack using bootstraping
nstack, npixels, nbands = src.shape
cr = np.zeros((nsim, npixels, nbands))
cr_err = np.zeros((nsim, npixels, nbands))
snr = np.zeros((nsim, npixels, nbands))
texp = np.zeros((nsim, npixels, nbands))
ecf_sample = np.zeros((nsim, nbands))
# msrc = np.zeros((nsim, npixels, nbands))
# mbkg = np.zeros((nsim, npixels, nbands))
# mexp = np.zeros((nsim, npixels, nbands))
for i in range(nsim):
idx_sample = np.random.randint(nstack, size=nstack)
S = np.sum(src[idx_sample, :, :], axis=0)
B = np.sum(bkg[idx_sample, :, :], axis=0)
t = np.sum(exp[idx_sample, :, :], axis=0)
if ac is None:
Bcorr = np.sum(bkg[idx_sample, :, :] / nbkg[idx_sample, :, :], axis=0)
ac = np.ones_like(bkg)
else:
Bcorr = np.sum(ac[idx_sample, :, :] * bkg[idx_sample, :, :], axis=0)
cr[i, :, :] = (
np.sum(src[idx_sample, :, :] / eef[idx_sample, :, :], axis=0) -
np.sum(bkg[idx_sample, :, :] / eef[idx_sample, :, :], axis=0)
) / t
cr_err[i, :, :] = np.sqrt(
np.sum(src[idx_sample, :, :] / eef[idx_sample, :, :]**2, axis=0) +
np.sum(ac[idx_sample, :, :] * bkg[idx_sample, :, :] / eef[idx_sample, :, :]**2, axis=0)
) / t
snr[i, :, :] = (S - B) / np.sqrt(S + Bcorr)
#snr[i, :, :] = cr[i, :, :] / cr_err[i, :, :]
ecf_sample[i, :] = np.mean(ecf[idx_sample, :], axis=0)
# msrc[i, :, :] = np.sum(src[idx_sample, :, :], axis=0)
# mbkg[i, :, :] = np.sum(bkg[idx_sample, :, :], axis=0)
# mexp[i, :, :] = np.sum(exp[idx_sample, :, :], axis=0)
texp[i, :, :] = t
cr_median = | np.nanmedian(cr, axis=0) | numpy.nanmedian |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on October 28, 2015
"""
from __future__ import division, print_function, unicode_literals, absolute_import
from PostProcessorInterfaceBaseClass import PostProcessorInterfaceBase, CheckInterfacePP
import os
import numpy as np
from scipy import interpolate
import copy
from utils import InputData, InputTypes
class dataObjectLabelFilter(PostProcessorInterfaceBase):
"""
This Post-Processor filters out the points or histories accordingly to a chosen clustering label
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super().getInputSpecification()
inputSpecification.setCheckClass(CheckInterfacePP("dataObjectLabelFilter"))
DOLFDataTypeType = InputTypes.makeEnumType("DOLFDataType", "DOLFDataTypeType", ['HistorySet','PointSet'])
inputSpecification.addSubSimple("dataType", DOLFDataTypeType)
inputSpecification.addSubSimple("label", InputTypes.StringType)
inputSpecification.addSubSimple("clusterIDs", InputTypes.IntegerListType)
#Should method be in super class?
inputSpecification.addSubSimple("method", contentType=InputTypes.StringType)
return inputSpecification
def initialize(self):
"""
Method to initialize the Interfaced Post-processor
@ In, None,
@ Out, None,
"""
PostProcessorInterfaceBase.initialize(self)
self.inputFormat = None
self.outputFormat = None
self.label = None
self.clusterIDs = []
def _handleInput(self, paramInput):
"""
Function to handle the parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
for child in paramInput.subparts:
if child.getName() == 'dataType':
dataType = child.value
if dataType in set(['HistorySet','PointSet']):
self.inputFormat = dataType
self.outputFormat = dataType
else:
self.raiseAnError(IOError, 'dataObjectLabelFilter Interfaced Post-Processor ' + str(self.name) + ' : dataType ' + str(dataType) + ' is not recognized (available are HistorySet, PointSet)')
elif child.getName() == 'label':
self.label = child.value
elif child.getName() == 'clusterIDs':
for clusterID in child.value:
self.clusterIDs.append(clusterID)
elif child.getName() !='method':
self.raiseAnError(IOError, 'dataObjectLabelFilter Interfaced Post-Processor ' + str(self.name) + ' : XML node ' + str(child) + ' is not recognized')
def run(self,inputDic):
"""
Method to post-process the dataObjects
@ In, inputDic, list, list of dictionaries which contains the data inside the input DataObjects
@ Out, outputDic, dictionary, output dictionary to be provided to the base class
"""
if len(inputDic)>1:
self.raiseAnError(IOError, 'HistorySetSync Interfaced Post-Processor ' + str(self.name) + ' accepts only one dataObject')
else:
inputDict = inputDic[0]
outputDict = {}
outputDict['data'] ={}
outputDict['dims'] = {}
outputDict['metadata'] = copy.deepcopy(inputDict['metadata']) if 'metadata' in inputDict.keys() else {}
labelType = type(inputDict['data'][self.label][0])
if labelType != np.ndarray:
indexes = np.where(np.in1d(inputDict['data'][self.label],self.clusterIDs))[0]
for key in inputDict['data'].keys():
outputDict['data'][key] = inputDict['data'][key][indexes]
outputDict['dims'][key] = []
else:
for key in inputDict['data'].keys():
if type(inputDict['data'][key][0]) == np.ndarray:
temp = []
for cnt in range(len(inputDict['data'][self.label])):
indexes = np.where(np.in1d(inputDict['data'][self.label][cnt],self.clusterIDs))[0]
if len(indexes) > 0:
temp.append(copy.deepcopy(inputDict['data'][key][cnt][indexes]))
outputDict['data'][key] = | np.asanyarray(temp) | numpy.asanyarray |
"""
computes the gradients of functional connectivity for 4 different fashions:
LL & RR intra-hemispheric and LR & RL inter-hemispheric fashion.
using Cole-Anticevic network parcellations, we compute the gradient scores
in each network and for all 4 fashions. we implement one sample t-test
on intra-hemispheric and inter-hemispheric gradient score differences in
each network (LL-RR differences and LR-RL differences).
"""
import os
import pandas as pd
import numpy as np
from scipy import stats
from brainspace.gradient import GradientMaps
import statistics
codedir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(os.path.dirname(codedir),
'data/data_results/FC/')
graddir = os.path.join(os.path.dirname(codedir),
'data/data_results/gradient/')
path = os.path.join(datadir, 'LL')
path_list = os.listdir(path)
path_list.sort()
# compute the group-level functional connectivity (FC) matrix
# for intra-hemispheric (LL, RR) and inter-hemispheric (LR, RL) fashions
n = len(path_list)
matrix_fc_LL = [None] * len(path_list)
matrix_fc_RR = [None] * len(path_list)
matrix_fc_RL = [None] * len(path_list)
matrix_fc_LR = [None] * len(path_list)
matrix_fc_LLRR = [None] * len(path_list)
matrix_fc_LRRL = [None] * len(path_list)
total_fc_LL = 0
total_fc_RR = 0
total_fc_RL = 0
total_fc_LR = 0
total_fc_LLRR = 0
total_fc_LRRL = 0
total_llrr = 0
total_lrrl = 0
for i in range(n):
matrix_fc_LL[i] = np.array(pd.read_csv(os.path.join(datadir, 'LL',
path_list[i]), header=None))
total_fc_LL += matrix_fc_LL[i]
matrix_fc_RR[i] = np.array(pd.read_csv(os.path.join(datadir, 'RR',
path_list[i]), header=None))
total_fc_RR += matrix_fc_RR[i]
matrix_fc_LR[i] = np.array(pd.read_csv(os.path.join(datadir, 'LR',
path_list[i]), header=None))
total_fc_LR += matrix_fc_LR[i]
matrix_fc_RL[i] = np.array(pd.read_csv(os.path.join(datadir, 'RL',
path_list[i]), header=None))
total_fc_RL += matrix_fc_RL[i]
matrix_fc_LLRR[i] = np.array(pd.read_csv(os.path.join(datadir, 'LL-RR',
path_list[i]), header=None))
total_fc_LLRR += matrix_fc_LLRR[i]
matrix_fc_LRRL[i] = np.array(pd.read_csv(os.path.join(datadir, 'LR-RL',
path_list[i]), header=None))
total_fc_LRRL += matrix_fc_LRRL[i]
mean_fc_LL = total_fc_LL/n
mean_fc_RR = total_fc_RR/n
mean_fc_RL = total_fc_RL/n
mean_fc_LR = total_fc_LR/n
mean_fc_LLRR = total_fc_LLRR/n
mean_fc_LRRL = total_fc_LRRL/n
np.savetxt(os.path.join(datadir, 'LL_groupmean.csv'), mean_fc_LL,
delimiter = ',')
np.savetxt(os.path.join(datadir, 'RR_groupmean.csv'), mean_fc_RR,
delimiter = ',')
np.savetxt(os.path.join(datadir, 'LR_groupmean.csv'), mean_fc_LR,
delimiter = ',')
np.savetxt(os.path.join(datadir, 'RL_groupmean.csv'), mean_fc_RL,
delimiter = ',')
np.savetxt(os.path.join(datadir, 'LLRR_groupmean.csv'), mean_fc_LLRR,
delimiter = ',')
np.savetxt(os.path.join(datadir, 'LRRL_groupmean.csv'), mean_fc_LRRL,
delimiter = ',')
print('Done! fc matrix computation across all subjects finished...')
# get the gradients of group-level FC for left-left (LL) connections
gm = GradientMaps(n_components=10, random_state=0, approach='dm',
kernel='normalized_angle')
LL = np.array(pd.read_csv(os.path.join(datadir, 'LL_groupmean.csv'),
header=None))
gm.fit(LL)
group_grad_LL = gm.gradients_
np.savetxt(os.path.join(graddir, 'group_grad_LL.csv'), group_grad_LL,
delimiter = ',')
np.savetxt(os.path.join(graddir, 'group_grad_LL_lambdas.csv'), gm.lambdas_,
delimiter = ',')
# get the gradients for each subject and each FC (LL, RR, LR, RL) and
# align them to the group-level LL gradients
for i in path_list:
# FC LL
align = GradientMaps(n_components=10, random_state=0, approach='dm',
kernel='normalized_angle', alignment='procrustes')
fc_LL = np.array(pd.read_csv(os.path.join(datadir, 'LL', i), header=None))
align.fit(fc_LL, reference=group_grad_LL)
grad_LL = align.gradients_
np.savetxt(os.path.join(graddir, 'LL', i), grad_LL, delimiter = ',')
# FC RR
align = GradientMaps(n_components=10, random_state=0, approach='dm',
kernel='normalized_angle', alignment='procrustes')
fc_RR = np.array(pd.read_csv(os.path.join(datadir, 'RR', i), header=None))
align.fit(fc_RR,reference=group_grad_LL)
grad_RR = align.gradients_
np.savetxt(os.path.join(graddir, 'RR', i), grad_RR, delimiter = ',')
# FC LR
align = GradientMaps(n_components=10, random_state=0, approach='dm',
kernel='normalized_angle', alignment='procrustes')
fc_LR = np.array(pd.read_csv(os.path.join(datadir, 'LR', i), header=None))
align.fit(fc_LR,reference=group_grad_LL)
grad_LR = align.gradients_
np.savetxt(os.path.join(graddir, 'LR', i), grad_LR, delimiter = ',')
# FC RL
align = GradientMaps(n_components=10, random_state=0, approach='dm',
kernel='normalized_angle', alignment='procrustes')
fc_RL = np.array(pd.read_csv(os.path.join(datadir, 'RL', i), header=None))
align.fit(fc_RL,reference=group_grad_LL)
grad_RL = align.gradients_
np.savetxt(os.path.join(graddir, 'RL', i), grad_RL, delimiter = ',')
print('finish ' + i)
# quality check: get the correlations between individual gradients and
# the group-level template gradient, swap the axis if negative
cons = ['LL', 'RR', 'LR', 'RL']
for dir in path_list:
for con in cons:
print(dir, con)
df = np.array(pd.read_csv(os.path.join(graddir, con, dir), header=None))
r = [None] * 10
corrected = [None]*10
for i in range(10):
r[i] = stats.pearsonr(group_grad_LL[:,i],df[:,i])
if r[i][0] > 0:
corrected[i]=df[:,i]
else:
corrected[i]=-1*df[:,i]
m = i +1
correct = open(os.path.join(graddir, 'corrected.txt'), 'a')
correct.write(str(con) + ' ' + dir + ' ' + str(m) +'\n')
correct.close()
corrected_x = np.concatenate((corrected[0].reshape(corrected[0].shape[0],1),
corrected[1].reshape(corrected[1].shape[0],1),
corrected[2].reshape(corrected[2].shape[0],1),
corrected[3].reshape(corrected[3].shape[0],1),
corrected[4].reshape(corrected[4].shape[0],1),
corrected[5].reshape(corrected[5].shape[0],1),
corrected[6].reshape(corrected[6].shape[0],1),
corrected[7].reshape(corrected[7].shape[0],1),
corrected[8].reshape(corrected[8].shape[0],1),
corrected[9].reshape(corrected[9].shape[0],1))
,axis = 1)
np.savetxt(os.path.join(graddir, con, dir), corrected_x, delimiter=',')
if con == 'LL':
corrected_ll = corrected_x
elif con == 'RR':
corrected_rr = corrected_x
elif con == 'LR':
corrected_lr = corrected_x
elif con == 'RL':
corrected_rl = corrected_x
AI_llrr = corrected_ll - corrected_rr
AI_lrrl = corrected_lr - corrected_rl
np.savetxt(os.path.join(graddir, 'LL-RR', dir), AI_llrr, delimiter = ',')
np.savetxt(os.path.join(graddir, 'LR-RL', dir), AI_lrrl, delimiter = ',')
# mean AI
total_llrr = total_llrr + AI_llrr
total_lrrl = total_lrrl + AI_lrrl
mean_llrr = total_llrr/len(path_list)
mean_lrrl = total_lrrl/len(path_list)
np.savetxt(os.path.join(graddir, 'mean_asym_LLRR.csv'), mean_llrr,
delimiter = ',')
np.savetxt(os.path.join(graddir, 'mean_asym_LRRL.csv'), mean_lrrl,
delimiter = ',')
# read Cole-Anticevic network parcels for left and right hemispheres,
# for each subject, we parcellate the gradients into 12 networks,
# for intra-hemispheric (LL, RR) and inter-hemispheric (LR, RL) differences,
# we subtract the mean gradient scores in each network
cadir= os.path.join(os.path.dirname(codedir), 'data')
ca_l = np.array(pd.read_csv(os.path.join(cadir, 'ca_glasser_network.csv'),
header=None))[:,0][:180]
ca_r = np.array(pd.read_csv(os.path.join(cadir, 'ca_glasser_network.csv'),
header=None))[:,0][180:]
for n in range(len(path_list)):
ll = np.array(pd.read_csv(os.path.join(graddir,'LL',path_list[n]),header=None))
rr = np.array(pd.read_csv(os.path.join(graddir,'RR',path_list[n]),header=None))
lr = np.array(pd.read_csv(os.path.join(graddir,'LR',path_list[n]),header=None))
rl = np.array(pd.read_csv(os.path.join(graddir,'RL',path_list[n]),header=None))
# intra-hemisphere
intra = [None] * 3
for i in range(3):
intra[i] = (statistics.mean(ll[:,i][np.where(ca_l==1)])-statistics.mean(rr[:,i][np.where(ca_r==1)]),
statistics.mean(ll[:,i][np.where(ca_l==2)])-statistics.mean(rr[:,i][np.where(ca_r==2)]),
statistics.mean(ll[:,i][np.where(ca_l==3)])-statistics.mean(rr[:,i][np.where(ca_r==3)]),
statistics.mean(ll[:,i][np.where(ca_l==4)])-statistics.mean(rr[:,i][np.where(ca_r==4)]),
statistics.mean(ll[:,i][np.where(ca_l==5)])-statistics.mean(rr[:,i][np.where(ca_r==5)]),
statistics.mean(ll[:,i][np.where(ca_l==6)])-statistics.mean(rr[:,i][np.where(ca_r==6)]),
statistics.mean(ll[:,i][np.where(ca_l==7)])-statistics.mean(rr[:,i][np.where(ca_r==7)]),
statistics.mean(ll[:,i][np.where(ca_l==8)])-statistics.mean(rr[:,i][np.where(ca_r==8)]),
statistics.mean(ll[:,i][np.where(ca_l==9)])-statistics.mean(rr[:,i][np.where(ca_r==9)]),
statistics.mean(ll[:,i][np.where(ca_l==10)])-statistics.mean(rr[:,i][np.where(ca_r==10)]),
statistics.mean(ll[:,i][np.where(ca_l==11)])-statistics.mean(rr[:,i][np.where(ca_r==11)]),
statistics.mean(ll[:,i][np.where(ca_l==12)])-statistics.mean(rr[:,i][np.where(ca_r==12)]))
np.savetxt(os.path.join(graddir, 'intra_ca', path_list[n]),
np.array(intra).T, delimiter = ',')
# inter-hemisphere
inter = [None] * 3
for i in range(3):
inter[i] = (statistics.mean(lr[:,i][np.where(ca_l==1)])-statistics.mean(rl[:,i][np.where(ca_r==1)]),
statistics.mean(lr[:,i][np.where(ca_l==2)])-statistics.mean(rl[:,i][np.where(ca_r==2)]),
statistics.mean(lr[:,i][np.where(ca_l==3)])-statistics.mean(rl[:,i][np.where(ca_r==3)]),
statistics.mean(lr[:,i][np.where(ca_l==4)])-statistics.mean(rl[:,i][np.where(ca_r==4)]),
statistics.mean(lr[:,i][np.where(ca_l==5)])-statistics.mean(rl[:,i][np.where(ca_r==5)]),
statistics.mean(lr[:,i][np.where(ca_l==6)])-statistics.mean(rl[:,i][np.where(ca_r==6)]),
statistics.mean(lr[:,i][np.where(ca_l==7)])-statistics.mean(rl[:,i][np.where(ca_r==7)]),
statistics.mean(lr[:,i][np.where(ca_l==8)])-statistics.mean(rl[:,i][np.where(ca_r==8)]),
statistics.mean(lr[:,i][np.where(ca_l==9)])-statistics.mean(rl[:,i][np.where(ca_r==9)]),
statistics.mean(lr[:,i][np.where(ca_l==10)])-statistics.mean(rl[:,i][np.where(ca_r==10)]),
statistics.mean(lr[:,i][np.where(ca_l==11)])-statistics.mean(rl[:,i][np.where(ca_r==11)]),
statistics.mean(lr[:,i][np.where(ca_l==12)])-statistics.mean(rl[:,i][np.where(ca_r==12)]))
np.savetxt(os.path.join(graddir, 'inter_ca', path_list[n]),
np.array(inter).T, delimiter = ',')
# Stats: implement one-sample t-test on intra-hemispheric (LL, RR) and
# inter-hemispheric (LR, RL) gradient score differences
# for each parcel (1,...,180) and for each gradient (1,2,3) separately
intra_g1 = [None] * len(path_list) # diff between LL and RR gradient 1
intra_g2 = [None] * len(path_list)
intra_g3 = [None] * len(path_list)
inter_g1 = [None] * len(path_list) # diff between LR and RL gradient 1
inter_g2 = [None] * len(path_list)
inter_g3 = [None] * len(path_list)
for n in range(len(path_list)):
intra_g1[n] = np.array(pd.read_csv(os.path.join(graddir, 'LL-RR', path_list[n]), header=None))[:,0]
intra_g2[n] = np.array(pd.read_csv(os.path.join(graddir, 'LL-RR', path_list[n]), header=None))[:,1]
intra_g3[n] = np.array(pd.read_csv(os.path.join(graddir, 'LL-RR', path_list[n]), header=None))[:,2]
inter_g1[n] = np.array(pd.read_csv(os.path.join(graddir, 'LR-RL', path_list[n]), header=None))[:,0]
inter_g2[n] = np.array(pd.read_csv(os.path.join(graddir, 'LR-RL', path_list[n]), header=None))[:,1]
inter_g3[n] = np.array(pd.read_csv(os.path.join(graddir, 'LR-RL', path_list[n]), header=None))[:,2]
intra_g1_stats = [None] * 180
intra_g2_stats = [None] * 180
intra_g3_stats = [None] * 180
inter_g1_stats = [None] * 180
inter_g2_stats = [None] * 180
inter_g3_stats = [None] * 180
for i in range(180):
intra_g1_stats[i] = stats.ttest_1samp(np.array(intra_g1)[:,i],0)
intra_g2_stats[i] = stats.ttest_1samp(np.array(intra_g2)[:,i],0)
intra_g3_stats[i] = stats.ttest_1samp(np.array(intra_g3)[:,i],0)
inter_g1_stats[i] = stats.ttest_1samp(np.array(inter_g1)[:,i],0)
inter_g2_stats[i] = stats.ttest_1samp(np.array(inter_g2)[:,i],0)
inter_g3_stats[i] = stats.ttest_1samp(np.array(inter_g3)[:,i],0)
pd.DataFrame(np.array(intra_g1_stats)).to_csv(os.path.join(graddir, 'intra_g1_stats.csv' ))
pd.DataFrame(np.array(intra_g2_stats)).to_csv(os.path.join(graddir, 'intra_g2_stats.csv' ))
pd.DataFrame(np.array(intra_g3_stats)).to_csv(os.path.join(graddir, 'intra_g3_stats.csv' ))
pd.DataFrame(np.array(inter_g1_stats)).to_csv(os.path.join(graddir, 'inter_g1_stats.csv' ))
pd.DataFrame(np.array(inter_g2_stats)).to_csv(os.path.join(graddir, 'inter_g2_stats.csv' ))
pd.DataFrame(np.array(inter_g3_stats)).to_csv(os.path.join(graddir, 'inter_g3_stats.csv' ))
# FDR correction on the p-values
def fdr(p_vals):
from scipy.stats import rankdata
ranked_p_values = rankdata(p_vals)
fdr = p_vals * len(p_vals) / ranked_p_values
fdr[fdr > 1] = 1
return fdr
pd.DataFrame(np.vstack((fdr(np.array(intra_g1_stats)[:,1]),
fdr( | np.array(intra_g2_stats) | numpy.array |
""" MCR Main Class for Computation"""
import sys as _sys
import numpy as _np
import logging as _logging
from pymcr.regressors import OLS, NNLS
from pymcr.constraints import ConstraintNonneg
from pymcr.metrics import mse
# create logger for mcr.py and set default level
_logger = _logging.getLogger(__name__)
_logger.setLevel(_logging.INFO)
class McrAR:
"""
Multivariate Curve Resolution - Alternating Regression
D = CS^T
Parameters
----------
c_regr : str, class
Instantiated regression class (or string, see Notes) for calculating
the C matrix
st_regr : str, class
Instantiated regression class (or string, see Notes) for calculating
the S^T matrix
fit_kwargs : dict
kwargs sent to fit and fit_transform methods
c_fit_kwargs : dict
kwargs sent to c_regr.fit method
st_fit_kwargs : dict
kwargs sent to st_regr.fit method
c_constraints : list
List of constraints applied to calculation of C matrix
st_constraints : list
List of constraints applied to calculation of S^T matrix
max_iter : int
Maximum number of iterations. One iteration calculates both C and S^T
err_fcn : function
Function to calculate error/differences after each least squares
calculation (ie twice per iteration). Outputs to err attribute.
tol_increase : float
Factor increase to allow in err attribute. Set to 0 for no increase
allowed. E.g., setting to 1.0 means the err can double per iteration.
tol_n_increase : int
Number of consecutive iterations for which the err attribute can
increase
tol_err_change : float
If err changes less than tol_err_change, per iteration, break.
tol_n_above_min : int
Number of half-iterations that can be performed without reaching a
new error-minimum
Attributes
----------
err : list
List of calculated errors (from err_fcn) after each least squares (ie
twice per iteration)
C_ : ndarray [n_samples, n_targets]
Most recently calculated C matrix (that did not cause a tolerance
failure)
ST_ : ndarray [n_targets, n_features]
Most recently calculated S^T matrix (that did not cause a tolerance
failure)
components_ : ndarray [n_targets, n_features]
Synonym for ST_, providing sklearn like compatibility
C_opt_ : ndarray [n_samples, n_targets]
[Optimal] C matrix for lowest err attribute
ST_opt_ : ndarray [n_targets, n_features]
[Optimal] ST matrix for lowest err attribute
n_iter : int
Total number of iterations performed
n_features : int
Total number of features, e.g. spectral frequencies.
n_samples : int
Total number of samples (e.g., pixels)
n_targets : int
Total number of targets (e.g., pure analytes)
n_iter_opt : int
Iteration when optimal C and ST calculated
exit_max_iter_reached : bool
Exited iterations due to maximum number of iteration reached (max_iter
parameter)
exit_tol_increase : bool
Exited iterations due to maximum fractional increase in error metric
(via err_fcn)
exit_tol_n_increase : bool
Exited iterations due to maximum number of consecutive increases in
error metric (via err fcn)
exit_tol_err_change : bool
Exited iterations due to error metric change that is smaller than
tol_err_change
exit_tol_n_above_min : bool
Exited iterations due to maximum number of half-iterations for which
the error metric increased above the minimum error
Notes
-----
- Built-in regressor classes (str can be used): OLS (ordinary least
squares), NNLS (non-negatively constrained least squares). See
mcr.regressors.
- Built-in regressor methods can be given as a string to c_regr, st_regr;
though instantiating an imported class gives more flexibility.
- Setting any tolerance to None turns that check off
"""
def __init__(self, c_regr=OLS(), st_regr=OLS(), fit_kwargs={},
c_fit_kwargs={}, st_fit_kwargs={}, c_constraints=[ConstraintNonneg()],
st_constraints=[ConstraintNonneg()],
max_iter=50, err_fcn=mse,
tol_increase=0.0, tol_n_increase=10, tol_err_change=None,
tol_n_above_min=10
):
"""
Multivariate Curve Resolution - Alternating Regression
"""
self.fit_kwargs = fit_kwargs
self.max_iter = max_iter
self.tol_increase = tol_increase
self.tol_n_increase = tol_n_increase
self.tol_err_change = tol_err_change
self.tol_n_above_min = tol_n_above_min
self.err_fcn = err_fcn
self.err = None
self.c_constraints = c_constraints
self.st_constraints = st_constraints
self.c_regressor = self._check_regr(c_regr)
self.st_regressor = self._check_regr(st_regr)
self.c_fit_kwargs = c_fit_kwargs
self.st_fit_kwargs = st_fit_kwargs
self.C_ = None
self.ST_ = None
self.C_opt_ = None
self.ST_opt_ = None
self.n_iter_opt = None
self.n_iter = None
self.n_increase = None
self.n_above_min = None
self.exit_max_iter_reached = False
self.exit_tol_increase = False
self.exit_tol_n_increase = False
self.exit_tol_err_change = False
self.exit_tol_n_above_min = False
# Saving every C or S^T matrix at each iteration
# Could create huge memory usage
self._saveall_st = False
self._saveall_c = False
self._saved_st = []
self._saved_c = []
def _check_regr(self, mth):
"""
Check regressor method. If acceptable strings, instantiate and
return object. If instantiated class, make sure it has a fit
attribute.
"""
if isinstance(mth, str):
if mth.upper() == 'OLS':
return OLS()
elif mth.upper() == 'NNLS':
return NNLS()
else:
raise ValueError('{} is unknown. Use NNLS or OLS.'.format(mth))
elif hasattr(mth, 'fit'):
return mth
else:
raise ValueError('Input class '
'{} does not have a \'fit\' method'.format(mth))
@property
def D_(self):
""" D matrix with current C and S^T matrices """
return _np.dot(self.C_, self.ST_)
@property
def D_opt_(self):
""" D matrix with optimal C and S^T matrices """
return _np.dot(self.C_opt_, self.ST_opt_)
@property
def n_features(self):
""" Number of features """
if self.ST_ is not None:
return self.ST_.shape[-1]
else:
return None
@property
def n_targets(self):
""" Number of targets """
if self.C_ is not None:
return self.C_.shape[1]
else:
return None
@property
def n_samples(self):
""" Number of samples """
if self.C_ is not None:
return self.C_.shape[0]
else:
return None
def _ismin_err(self, val):
""" Is the current error the minimum """
if len(self.err) == 0:
return True
else:
return ([val > x for x in self.err].count(True) == 0)
def fit(self, D, C=None, ST=None, st_fix=None, c_fix=None, c_first=True,
verbose=False, post_iter_fcn=None, post_half_fcn=None):
"""
Perform MCR-AR. D = CS^T. Solve for C and S^T iteratively.
Parameters
----------
D : ndarray
D matrix
C : ndarray
Initial C matrix estimate. Only provide initial C OR S^T.
ST : ndarray
Initial S^T matrix estimate. Only provide initial C OR S^T.
st_fix : list
The spectral component numbers to keep fixed.
c_fix : list
The concentration component numbers to keep fixed.
c_first : bool
Calculate C first when both C and ST are provided. c_fix and st_fix
must also be provided in this circumstance.
verbose : bool
Log iteration and per-least squares err results. See Notes.
post_iter_fcn : function
Function to perform after each iteration
post_half_fcn : function
Function to perform after half-iteration
Notes
-----
- Parameters to fit will SUPERCEDE anything in fit_kwargs, if provided during McrAR
instantiation.
- Note that providing C (or ST) to fit_kwargs and providing ST (or C) to fit or
fit_transform will raise an error.
- When in doubt, clear fit_kwargs via self.fit_kwargs = {}
- Does not affect verbose or c_first parameters
- pyMCR (>= 0.3.1) uses the native Python logging module
rather than print statements; thus, to see the messages, one will
need to log-to-file or stream to stdout. More info is available in
the docs.
"""
D = _np.asanyarray(D)
if verbose:
_logger.setLevel(_logging.DEBUG)
else:
_logger.setLevel(_logging.INFO)
if self.fit_kwargs:
temp = self.fit_kwargs.get('C')
if (temp is not None) & (C is None):
C = temp
temp = self.fit_kwargs.get('ST')
if (temp is not None) & (ST is None):
ST = temp
temp = self.fit_kwargs.get('st_fix')
if (temp is not None) & (st_fix is None):
st_fix = temp
temp = self.fit_kwargs.get('c_fix')
if (temp is not None) & (c_fix is None):
c_fix = temp
temp = self.fit_kwargs.get('post_iter_fcn')
if (temp is not None) & (post_iter_fcn is None):
post_iter_fcn = temp
temp = self.fit_kwargs.get('post_half_fcn')
if (temp is not None) & (post_iter_fcn is None):
post_half_fcn = temp
# Ensure only C or ST provided
if (C is None) & (ST is None):
raise TypeError('C or ST estimate must be provided')
elif (C is not None) & (ST is not None) & ((c_fix is None) |
(st_fix is None)):
err_str1 = 'Only C or ST estimate must be provided, '
raise TypeError(
err_str1 + 'unless c_fix and st_fix are both provided')
else:
self.C_ = _np.asanyarray(C) if C is not None else C
self.ST_ = _np.asanyarray(ST) if ST is not None else ST
self.n_increase = 0
self.n_above_min = 0
self.err = []
# Both C and ST provided. special_skip_c comes into play below
both_condition = (self.ST_ is not None) & (self.C_ is not None)
for num in range(self.max_iter):
self.n_iter = num + 1
# Both st and c provided, but c_first is False
if both_condition & (num == 0) & (not c_first):
special_skip_c = True
else:
special_skip_c = False
if (self.ST_ is not None) & (not special_skip_c):
# Debugging feature -- saves every S^T matrix in a list
# Can create huge memory usage
if self._saveall_st:
self._saved_st.append(self.ST_)
# * Target is the feature of the regression
self.c_regressor.fit(self.ST_.T, D.T, **self.c_fit_kwargs)
C_temp = self.c_regressor.coef_
# Apply fixed C's
if c_fix:
C_temp[:, c_fix] = self.C_[:, c_fix]
# Apply c-constraints
for constr in self.c_constraints:
C_temp = constr.transform(C_temp)
# Apply fixed C's
if c_fix:
C_temp[:, c_fix] = self.C_[:, c_fix]
D_calc = _np.dot(C_temp, self.ST_)
err_temp = self.err_fcn(C_temp, self.ST_, D, D_calc)
if self._ismin_err(err_temp):
self.C_opt_ = 1 * C_temp
self.ST_opt_ = 1 * self.ST_
self.n_iter_opt = num + 1
self.n_above_min = 0
else:
self.n_above_min += 1
if self.tol_n_above_min is not None:
if self.n_above_min > self.tol_n_above_min:
err_str1 = 'Half-iterated {} times since ' \
'min '.format(self.n_above_min)
err_str2 = 'error. Exiting.'
_logger.info(err_str1 + err_str2)
self.exit_tol_n_above_min = True
break
# Calculate error fcn and check for tolerance increase
if len(self.err) == 0:
self.err.append(1 * err_temp)
self.C_ = 1 * C_temp
elif self.tol_increase is None:
self.err.append(1 * err_temp)
self.C_ = 1 * C_temp
elif err_temp <= self.err[-1] * (1 + self.tol_increase):
self.err.append(1 * err_temp)
self.C_ = 1 * C_temp
else:
err_str1 = 'Error increased above fractional' \
'ctol_increase (C iter). Exiting'
_logger.info(err_str1)
self.exit_tol_increase = True
break
# Check if err went up
if len(self.err) > 1:
if self.err[-1] > self.err[-2]: # Error increased
self.n_increase += 1
else:
self.n_increase *= 0
# Break if too many error-increases in a row
if self.tol_n_increase is not None:
if self.n_increase > self.tol_n_increase:
out_str1 = 'Maximum error increases reached '
_logger.info(
out_str1 + '({}) (C iter). '
'Exiting.'.format(self.tol_n_increase))
self.exit_tol_n_increase = True
break
_logger.debug('Iter: {} (C)\t{}: '
'{:.4e}'.format(self.n_iter,
self.err_fcn.__name__,
err_temp))
if post_half_fcn is not None:
post_half_fcn(self.C_, self.ST_, D, D_calc)
if self.C_ is not None:
# Debugging feature -- saves every C matrix in a list
# Can create huge memory usage
if self._saveall_c:
self._saved_c.append(self.C_)
# * Target is the feature of the regression
self.st_regressor.fit(self.C_, D, **self.st_fit_kwargs)
ST_temp = self.st_regressor.coef_.T
# Apply fixed ST's
if st_fix:
ST_temp[st_fix] = self.ST_[st_fix]
# Apply ST-constraints
for constr in self.st_constraints:
ST_temp = constr.transform(ST_temp)
# Apply fixed ST's
if st_fix:
ST_temp[st_fix] = self.ST_[st_fix]
D_calc = _np.dot(self.C_, ST_temp)
err_temp = self.err_fcn(self.C_, ST_temp, D, D_calc)
# Calculate error fcn and check for tolerance increase
if self._ismin_err(err_temp):
self.ST_opt_ = 1 * ST_temp
self.C_opt_ = 1 * self.C_
self.n_iter_opt = num + 1
self.n_above_min = 0
else:
self.n_above_min += 1
if self.tol_n_above_min is not None:
if self.n_above_min > self.tol_n_above_min:
err_str1 = 'Half-iterated {} times ' \
'since min '.format(self.n_above_min)
err_str2 = 'error. Exiting.'
_logger.info(err_str1 + err_str2)
self.exit_tol_n_above_min = True
break
if len(self.err) == 0:
self.err.append(1 * err_temp)
self.ST_ = 1 * ST_temp
elif self.tol_increase is None:
self.err.append(1 * err_temp)
self.ST_ = 1 * ST_temp
elif err_temp <= self.err[-1] * (1 + self.tol_increase):
self.err.append(1 * err_temp)
self.ST_ = 1 * ST_temp
else:
err_str1 = 'Error increased above fractional ' \
'tol_increase (ST iter). Exiting'
_logger.info(err_str1)
self.exit_tol_increase = True
break
# Check if err went up
if len(self.err) > 1:
if self.err[-1] > self.err[-2]: # Error increased
self.n_increase += 1
else:
self.n_increase *= 0
# Break if too many error-increases in a row
if self.tol_n_increase is not None:
if self.n_increase > self.tol_n_increase:
out_str = 'Maximum error increases reached '
_logger.info(out_str +
'({}) (ST iter). '
'Exiting.'.format(self.tol_n_increase))
self.exit_tol_n_increase = True
break
_logger.debug('Iter: {} (ST)\t{}: '
'{:.4e}'.format(self.n_iter,
self.err_fcn.__name__, err_temp))
if post_half_fcn is not None:
post_half_fcn(self.C_, self.ST_, D, D_calc)
if post_iter_fcn is not None:
post_iter_fcn(self.C_, self.ST_, D, D_calc)
if self.n_iter >= self.max_iter:
_logger.info('Max iterations reached ({}).'.format(num + 1))
self.exit_max_iter_reached = True
break
self.n_iter = num + 1
# Check if err changed (absolute value), per iteration, less
# than abs(tol_err_change)
if (self.tol_err_change is not None) & (len(self.err) > 2):
err_differ = _np.abs(self.err[-1] - self.err[-3])
if err_differ < _np.abs(self.tol_err_change):
_logger.info('Change in err below tol_err_change '
'({:.4e}). Exiting.'.format(err_differ))
self.exit_tol_err_change = True
break
def fit_transform(self, D, **kwargs):
"""
This performs the same purpose as the fit method, but returns the C_ matrix.
Really, it's just to enable sklearn-expectant APIs compatible with pyMCR.
It is recommended to use the fit method and retrieve your results from C_ and ST_
See documentation for the fit method
Returns
--------
C_ : ndarray
C-matrix is returned
"""
self.fit(D, **kwargs)
return self.C_
@property
def components_(self):
""" This is just provided for sklearn-like functionality """
return self.ST_
if __name__ == '__main__': # pragma: no cover
# PyMCR uses the Logging facility to capture messaging
# Sends logging messages to stdout (prints them)
stdout_handler = _logging.StreamHandler(stream=_sys.stdout)
stdout_format = _logging.Formatter('%(message)s')
stdout_handler.setFormatter(stdout_format)
_logger.addHandler(stdout_handler)
M = 21
N = 21
P = 101
n_components = 2
C_img = _np.zeros((M, N, n_components))
C_img[..., 0] = _np.dot(_np.ones((M, 1)), _np.linspace(0, 1, N)[None, :])
C_img[..., 1] = 1 - C_img[..., 0]
St_known = | _np.zeros((n_components, P)) | numpy.zeros |
# -*- coding: utf-8 -*-
#
# layers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Run as python layers.py > layers.log
import matplotlib.pyplot as plt
import nest
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(1234567)
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks is None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
top['columns'])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
top['rows'])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
# --------------------------------------------------
nest.ResetKernel()
#{ layer1 #}
import nest.topology as tp
l = tp.CreateLayer({'rows': 5,
'columns': 5,
'elements': 'iaf_psc_alpha'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(ax.text(0.65, 0.4 - r * 0.2, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(ax.text(-0.4 + r * 0.2, 0.65, str(r),
horizontalalignment='center',
verticalalignment='center'))
# For bbox_extra_artists, see
# https://github.com/matplotlib/matplotlib/issues/351
plt.savefig('../user_manual_figures/layer1.png', bbox_inches='tight',
bbox_extra_artists=tx)
print("#{ layer1s.log #}")
#{ layer1s #}
print(nest.GetStatus(l)[0]['topology'])
#{ end #}
print("#{ end.log #}")
print("#{ layer1p.log #}")
#{ layer1p #}
nest.PrintNetwork(depth=3)
#{ end #}
print("#{ end.log #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer2 #}
l = tp.CreateLayer({'rows': 5,
'columns': 5,
'extent': [2.0, 0.5],
'elements': 'iaf_psc_alpha'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(fig.gca().text(1.25, 0.2 - r * 0.1, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(fig.gca().text(-0.8 + r * 0.4, 0.35, str(r),
horizontalalignment='center',
verticalalignment='center'))
# See https://github.com/matplotlib/matplotlib/issues/351
plt.savefig('../user_manual_figures/layer2.png', bbox_inches='tight',
bbox_extra_artists=tx)
# --------------------------------------------------
nest.ResetKernel()
#{ layer3 #}
l1 = tp.CreateLayer({'rows': 5, 'columns': 5, 'elements': 'iaf_psc_alpha'})
l2 = tp.CreateLayer({'rows': 5, 'columns': 5, 'elements': 'iaf_psc_alpha',
'center': [-1., 1.]})
l3 = tp.CreateLayer({'rows': 5, 'columns': 5, 'elements': 'iaf_psc_alpha',
'center': [1.5, 0.5]})
#{ end #}
fig = tp.PlotLayer(l1, nodesize=50)
tp.PlotLayer(l2, nodesize=50, nodecolor='g', fig=fig)
tp.PlotLayer(l3, nodesize=50, nodecolor='r', fig=fig)
beautify_layer(l1, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-1.6, 2.1], ylim=[-0.6, 1.6],
xticks=np.arange(-1.4, 2.05, 0.2),
yticks=np.arange(-0.4, 1.45, 0.2))
plt.savefig('../user_manual_figures/layer3.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer3a #}
nc, nr = 5, 3
d = 0.1
l = tp.CreateLayer({'columns': nc, 'rows': nr, 'elements': 'iaf_psc_alpha',
'extent': [nc * d, nr * d], 'center': [nc * d / 2., 0.]})
#{ end #}
fig = tp.PlotLayer(l, nodesize=100)
plt.plot(0, 0, 'x', markersize=20, c='k', mew=3)
plt.plot(nc * d / 2, 0, 'o', markersize=20, c='k', mew=3, mfc='none',
zorder=100)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xticks=np.arange(0., 0.501, 0.05),
yticks=np.arange(-0.15, 0.151, 0.05),
xlim=[-0.05, 0.55], ylim=[-0.2, 0.2])
plt.savefig('../user_manual_figures/layer3a.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4 #}
import numpy as np
pos = [[np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5)]
for j in range(50)]
l = tp.CreateLayer({'positions': pos,
'elements': 'iaf_psc_alpha'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.55], ylim=[-0.55, 0.55],
xticks=[-0.5, 0., 0.5], yticks=[-0.5, 0., 0.5])
plt.savefig('../user_manual_figures/layer4.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d #}
import numpy as np
pos = [[np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5),
np.random.uniform(-0.5, 0.5)] for j in range(200)]
l = tp.CreateLayer({'positions': pos,
'elements': 'iaf_psc_alpha'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ player #}
lp = tp.CreateLayer({'rows': 1, 'columns': 5, 'extent': [5., 1.],
'elements': 'iaf_psc_alpha',
'edge_wrap': True})
#{ end #}
# fake plot with layer on line and circle
clist = [(0, 0, 1), (0.35, 0, 1), (0.6, 0, 1), (0.8, 0, 1), (1.0, 0, 1)]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1.scatter(range(1, 6), [0] * 5, s=200, c=clist)
ax1.set_xlim([0, 6])
ax1.set_ylim([-0.5, 1.25])
ax1.set_aspect('equal', 'box')
ax1.set_xticks([])
ax1.set_yticks([])
for j in range(1, 6):
ax1.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax1a = fig.add_subplot(223)
ax1a.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1a.scatter(range(1, 6), [0] * 5, s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax1a.set_xlim([0, 6])
ax1a.set_ylim([-0.5, 1.25])
ax1a.set_aspect('equal', 'box')
ax1a.set_xticks([])
ax1a.set_yticks([])
for j in range(1, 6):
ax1a.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax2 = fig.add_subplot(122)
phic = np.arange(0., 2 * np.pi + 0.5, 0.1)
r = 5. / (2 * np.pi)
ax2.plot(r * | np.cos(phic) | numpy.cos |
import numpy as np
from tuning.cyMINoncyclic import mc_mean_grad_gaussian
import matplotlib.pyplot as plt
#============Gaussian Noncyclic model with NO constraints============
# -----------Bandit Algorithm helper functions-----------
def gaussian_log_ratio(bin_index, output_value, input_set, input_prob_vec, inverse_cov_matrix):
# bin_index: i = 0,1,...,numBin-1
# output_value: response r (vector with length=numNeuro)
# input_set: {\lambda_{k,j}} (vector shape=(numNeuro,numBin)) (same as tuning_curve) (all nonzero)
# input_prob_vec: {w_j} (list or 1-d vector with length = numBin)
# inverse_cov_matrix: numNeuro-by-numNeuro numpy array.
if input_set.ndim == 1:
input_set = input_set.reshape((1,-1))
numNeuro, numBin = input_set.shape
sum_exp = 0
for l in range(numBin):
vec_l = output_value - input_set[:,l]
vec_bin_index = output_value - input_set[:, bin_index]
quad_l = np.dot(vec_l, np.dot(inverse_cov_matrix, vec_l))
quad_bin_index = np.dot(vec_bin_index, np.dot(inverse_cov_matrix, vec_bin_index))
sum_exp += input_prob_vec[l]*np.exp(-0.5*(quad_l - quad_bin_index))
return -np.log(sum_exp)
# -----------Bandit Algorithm for Arimoto iterations-----------
def gaussian_bandit_iteration(input_set, inverse_cov_matrix,
initial_prob_vec = None, max_iter = 1000, batch_size = 1,
dkl_discount_factor = "decrease", epsilon=0,
update_rule = "additive",#"direct", "multiply"
initial_learning_rate = 0.01, learning_rate_decrease_rate = 0):
# 'temporal difference scheme'
# dkl_discount_factor = "decrease" or [0,1) e.g. 0.9
# dkl_discount_factor = 0 is the same as using argmax(rewards) ignoring path history
# epsilon probability on selecting other choices
if not (dkl_discount_factor == "decrease" or (dkl_discount_factor >=0 and dkl_discount_factor <=1)):
raise Exception("dkl_discount_factor must be in [0,1] or 'decrease'!")
if not (update_rule in ["additive", "direct", "multiply"]):
raise Exception("update_rule must be in ['additive', 'direct','multiply']!")
if input_set.ndim == 1:
input_set = input_set.reshape((1,-1))
numNeuro, numBin = input_set.shape
#num_inputs = numBin
if initial_prob_vec is None:
prob_vec = 1.0*np.ones(numBin)/numBin # initalize with equal weights
else:
prob_vec = initial_prob_vec.copy()
rewards = np.zeros(numBin)
DKL_estimates = np.zeros(numBin)
num_iter = 0
rewards_list = []
DKL_estimates_list = []
prob_vec_list = [prob_vec.copy()]
# generate gaussian samples in advance
cov_matrix = | np.linalg.inv(inverse_cov_matrix) | numpy.linalg.inv |
import numpy as np
from base_test import ArkoudaTest
from context import arkouda as ak
from arkouda.dtypes import npstr
"""
Encapsulates unit tests for the numeric module with the exception
of the where method, which is in the where_test module
"""
class NumericTest(ArkoudaTest):
def testSeededRNG(self):
N = 100
seed = 8675309
numericdtypes = [ak.int64, ak.float64, ak.bool, ak.uint64]
for dt in numericdtypes:
# Make sure unseeded runs differ
a = ak.randint(0, 2**32, N, dtype=dt)
b = ak.randint(0, 2**32, N, dtype=dt)
self.assertFalse((a == b).all())
# Make sure seeded results are same
a = ak.randint(0, 2**32, N, dtype=dt, seed=seed)
b = ak.randint(0, 2**32, N, dtype=dt, seed=seed)
self.assertTrue((a == b).all())
# Uniform
self.assertFalse((ak.uniform(N) == ak.uniform(N)).all())
self.assertTrue((ak.uniform(N, seed=seed) == ak.uniform(N, seed=seed)).all())
# Standard Normal
self.assertFalse((ak.standard_normal(N) == ak.standard_normal(N)).all())
self.assertTrue((ak.standard_normal(N, seed=seed) == ak.standard_normal(N, seed=seed)).all())
# Strings (uniformly distributed length)
self.assertFalse(
(ak.random_strings_uniform(1, 10, N) == ak.random_strings_uniform(1, 10, N)).all()
)
self.assertTrue(
(
ak.random_strings_uniform(1, 10, N, seed=seed)
== ak.random_strings_uniform(1, 10, N, seed=seed)
).all()
)
# Strings (log-normally distributed length)
self.assertFalse(
(ak.random_strings_lognormal(2, 1, N) == ak.random_strings_lognormal(2, 1, N)).all()
)
self.assertTrue(
(
ak.random_strings_lognormal(2, 1, N, seed=seed)
== ak.random_strings_lognormal(2, 1, N, seed=seed)
).all()
)
def testCast(self):
N = 100
arrays = {
ak.int64: ak.randint(-(2**48), 2**48, N),
ak.float64: ak.randint(0, 1, N, dtype=ak.float64),
ak.bool: ak.randint(0, 2, N, dtype=ak.bool),
}
roundtripable = set(
(
(ak.bool, ak.bool),
(ak.int64, ak.int64),
(ak.int64, ak.float64),
(ak.int64, npstr),
(ak.float64, ak.float64),
(ak.float64, npstr),
(ak.uint8, ak.int64),
(ak.uint8, ak.float64),
(ak.uint8, npstr),
)
)
for t1, orig in arrays.items():
for t2 in ak.DTypes:
t2 = ak.dtype(t2)
other = ak.cast(orig, t2)
self.assertEqual(orig.size, other.size)
if (t1, t2) in roundtripable:
roundtrip = ak.cast(other, t1)
self.assertTrue(
(orig == roundtrip).all(), f"{t1}: {orig[:5]}, {t2}: {roundtrip[:5]}"
)
self.assertTrue((ak.array([1, 2, 3, 4, 5]) == ak.cast(ak.linspace(1, 5, 5), dt=ak.int64)).all())
self.assertEqual(ak.cast(ak.arange(0, 5), dt=ak.float64).dtype, ak.float64)
self.assertTrue(
(
ak.array([False, True, True, True, True]) == ak.cast(ak.linspace(0, 4, 5), dt=ak.bool)
).all()
)
def testStrCastErrors(self):
intNAN = -(2**63)
intstr = ak.array(["1", "2 ", "3?", "!4", " 5", "-45", "0b101", "0x30", "N/A"])
intans = np.array([1, 2, intNAN, intNAN, 5, -45, 0b101, 0x30, intNAN])
uintNAN = 0
uintstr = ak.array(["1", "2 ", "3?", "-4", " 5", "45", "0b101", "0x30", "N/A"])
uintans = np.array([1, 2, uintNAN, uintNAN, 5, 45, 0b101, 0x30, uintNAN])
floatstr = ak.array(["1.1", "2.2 ", "3?.3", "4.!4", " 5.5", "6.6e-6", "78.91E+4", "6", "N/A"])
floatans = np.array([1.1, 2.2, np.nan, np.nan, 5.5, 6.6e-6, 78.91e4, 6.0, np.nan])
boolstr = ak.array(
["True", "False ", "Neither", "N/A", " True", "true", "false", "TRUE", "NOTTRUE"]
)
boolans = np.array([True, False, False, False, True, True, False, True, False])
validans = ak.array([True, True, False, False, True, True, True, True, False])
for dt, arg, ans in [
(ak.int64, intstr, intans),
(ak.uint64, uintstr, uintans),
(ak.float64, floatstr, floatans),
(ak.bool, boolstr, boolans),
]:
with self.assertRaises(RuntimeError):
ak.cast(arg, dt, errors=ak.ErrorMode.strict)
res = ak.cast(arg, dt, errors=ak.ErrorMode.ignore)
self.assertTrue(np.allclose(ans, res.to_ndarray(), equal_nan=True))
res, valid = ak.cast(arg, dt, errors=ak.ErrorMode.return_validity)
self.assertTrue((valid == validans).all())
self.assertTrue(np.allclose(ans, res.to_ndarray(), equal_nan=True))
def testHistogram(self):
pda = ak.randint(10, 30, 40)
bins, result = ak.histogram(pda, bins=20)
self.assertIsInstance(result, ak.pdarray)
self.assertEqual(20, len(bins))
self.assertEqual(20, len(result))
self.assertEqual(int, result.dtype)
with self.assertRaises(TypeError):
ak.histogram([range(0,10)], bins=1)
with self.assertRaises(TypeError):
ak.histogram(pda, bins='1')
with self.assertRaises(TypeError):
ak.histogram([range(0,10)], bins='1')
def testLog(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.log(na) == ak.log(pda).to_ndarray()).all())
with self.assertRaises(TypeError):
ak.log([range(0,10)])
def testExp(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.exp(na) == ak.exp(pda).to_ndarray()).all())
with self.assertRaises(TypeError):
ak.exp([range(0,10)])
def testAbs(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.abs(na) == ak.abs(pda).to_ndarray()).all())
self.assertTrue((ak.arange(5, 1, -1) == ak.abs(ak.arange(-5, -1))).all())
self.assertTrue((ak.array([5, 4, 3, 2, 1]) == ak.abs(ak.linspace(-5, -1, 5))).all())
with self.assertRaises(TypeError):
ak.abs([range(0, 10)])
def testCumSum(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.cumsum(na) == ak.cumsum(pda).to_ndarray()).all())
# Test uint case
na = np.linspace(1, 10, 10, "uint64")
pda = ak.cast(pda, ak.uint64)
self.assertTrue((np.cumsum(na) == ak.cumsum(pda).to_ndarray()).all())
with self.assertRaises(TypeError):
ak.cumsum([range(0, 10)])
def testCumProd(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.cumprod(na) == ak.cumprod(pda).to_ndarray()).all())
with self.assertRaises(TypeError):
ak.cumprod([range(0, 10)])
def testSin(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.sin(na) == ak.sin(pda).to_ndarray()).all())
with self.assertRaises(TypeError):
ak.cos([range(0, 10)])
def testCos(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.cos(na) == ak.cos(pda).to_ndarray()).all())
with self.assertRaises(TypeError):
ak.cos([range(0, 10)])
def testHash(self):
h1, h2 = ak.hash(ak.arange(10))
rev = ak.arange(9, -1, -1)
h3, h4 = ak.hash(rev)
self.assertTrue((h1 == h3[rev]).all() and (h2 == h4[rev]).all())
h1 = ak.hash(ak.arange(10), full=False)
h3 = ak.hash(rev, full=False)
self.assertTrue((h1 == h3[rev]).all())
h = ak.hash(ak.linspace(0, 10, 10))
self.assertTrue((h[0].dtype == ak.uint64) and (h[1].dtype == ak.uint64))
def testValueCounts(self):
pda = ak.ones(100, dtype=ak.int64)
result = ak.value_counts(pda)
self.assertEqual(ak.array([1]), result[0])
self.assertEqual(ak.array([100]), result[1])
pda = ak.linspace(1, 10, 10)
with self.assertRaises(TypeError):
ak.value_counts(pda)
with self.assertRaises(TypeError):
ak.value_counts([0])
def test_isnan(self):
"""
Test efunc `isnan`; it returns a pdarray of element-wise T/F values for whether it is NaN
(not a number)
Currently we only support float based arrays since numpy doesn't support NaN in int-based arrays
"""
npa = np.array([1, 2, None, 3, 4], dtype="float64")
ark_s_float64 = ak.array(npa)
ark_isna_float64 = ak.isnan(ark_s_float64)
actual = ark_isna_float64.to_ndarray()
expected = np.isnan(npa)
self.assertTrue(np.array_equal(expected, actual))
# Currently we can't make an int64 array with a NaN in it so verify that we throw an Exception
ark_s_int64 = ak.array( | np.array([1, 2, 3, 4], dtype="int64") | numpy.array |
### Use with environment unet2DE
### script will match a corrected timelapse image to the A594-channel HCR image.
import numpy as np
from bin.fatetrack_register_v3 import *
from urllib.parse import urlparse
import cellpose
from cellpose import utils, io,models
import matplotlib
import matplotlib.pyplot as plt
import time, os, sys
import pandas as pd
import glob
### Part 1 : Image Registration
from skimage import img_as_uint,io,registration,transform,filters,restoration,util,feature,morphology,exposure,measure
from sklearn.cluster import KMeans
from scipy import ndimage
from skimage.util import montage
from scipy.spatial.distance import cdist
patch_kw = dict(patch_size=5,patch_distance=6, multichannel=False)
def getSubImage(image_path, image_name, channel='DAPI', dimx_0=0,dimx_1=1,dimy_0=0,dimy_1=1):
hcr_best_channel = io.imread(image_path+image_name+channel+'.tif')[dimx_0:dimx_1,dimy_0:dimy_1]
return(hcr_best_channel)
def TL_maxIntensity(nuclearImage, frame=-1):
time_image = io.imread(nuclearImage)
time_image = time_image[frame]
time_image = time_image / np.max(time_image)
tmp_time_image = morphology.white_tophat(time_image, morphology.disk(12))
tmp_time_image = tmp_time_image/np.max(tmp_time_image)
tmp_time_image = exposure.equalize_adapthist(tmp_time_image)
sigma_est = np.mean(restoration.estimate_sigma(tmp_time_image.astype(float), multichannel=False))
tmp_time_image = restoration.denoise_nl_means(tmp_time_image.astype(float), h=2*sigma_est, fast_mode=True,**patch_kw)
time_filter = KMeans(n_clusters = 2, random_state = 0).fit_predict(tmp_time_image.reshape(np.prod(tmp_time_image.shape), 1)).reshape(tmp_time_image.shape)
image0 = tmp_time_image*(time_filter == 0)
image1 = tmp_time_image*(time_filter == 1)
i0 = np.average(image0[image0 > 0])
i1 = | np.average(image1[image1 > 0]) | numpy.average |
from sys import argv
from trainer.configs import TrainingConfigs
from trainer.train_model import train_model
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
import numpy as np
#import os
#from common import get_hyperparameters
def create_training_cfg() -> TrainingConfigs:
# 20NG - MR - Ohsumed - R8, R52
conf = TrainingConfigs()
conf.data_sets = ['20ng', 'R8', 'R52', 'ohsumed',
'mr', 'cora', 'citeseer', 'pubmed', 'test']
#conf.data_sets = ['20ng', 'R8', 'R52', 'ohsumed', 'mr', 'test']
conf.corpus_split_index_dir = 'data/corpus.shuffled/split_index/'
conf.corpus_node_features_dir = 'data/corpus.shuffled/node_features/'
conf.corpus_adjacency_dir = '' # 'data/corpus.shuffled/adjacency/'
conf.corpus_vocab_dir = 'data/corpus.shuffled/vocabulary/'
conf.adjacency_sets = ['frequency', 'syntactic_dependency', 'linguistic_inquiry', 'graph']
conf.model = 'gcn'
conf.learning_rate = 0.02
conf.epochs = 200
conf.hidden1 = 200
conf.dropout = 0.5
conf.weight_decay = 0.
conf.early_stopping = 10
conf.chebyshev_max_degree = 3
conf.build()
return conf
def train(ds: str, training_cfg: TrainingConfigs):
# Start training
return train_model(ds_name=ds, is_featureless=True, cfg=training_cfg)
def save_history(hist, representation, dataset):
file_name = f'experiments/{representation}_dataset_{dataset}.txt'
with open(file_name, 'w') as my_file:
my_file.writelines(hist)
def tsne_visualizer(data_set, representation):
# data_set = 'mr' # 20ng R8 R52 ohsumed mr
data_path = './data/corpus.shuffled'
with open(f'{data_path}/split_index/{data_set}.train', 'r') as f:
lines = f.readlines()
train_size = len(lines)
with open(f'{data_path}/meta/{data_set}.meta', 'r') as f:
lines = f.readlines()
target_names = set()
labels = []
for line in lines:
line = line.strip()
temp = line.split('\t')
labels.append(temp[2])
target_names.add(temp[2])
target_names = list(target_names)
with open(f'./data/{data_set}_doc_vectors.txt', 'r') as f:
lines = f.readlines()
docs = []
for line in lines:
temp = line.strip().split()
values_str_list = temp[1:]
values = [float(x) for x in values_str_list]
docs.append(values)
fea = docs[train_size:] # int(train_size * 0.9)
label = labels[train_size:] # int(train_size * 0.9)
label = | np.array(label) | numpy.array |
import pytest
import os
import glob
import json
from numpy import arange, allclose
from bolt import array as barray
from thunder.images.readers import fromlist, fromarray, frompng, fromtif, frombinary, fromexample
pytestmark = pytest.mark.usefixtures("eng")
resources = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'resources')
def test_from_list(eng):
a = arange(8).reshape((2, 4))
data = fromlist([a], engine=eng)
assert allclose(data.shape, (1,) + a.shape)
assert allclose(data.value_shape, a.shape)
assert allclose(data.toarray(), a)
def test_from_array(eng):
a = arange(8).reshape((1, 2, 4))
data = fromarray(a, engine=eng)
assert allclose(data.shape, a.shape)
assert allclose(data.value_shape, a.shape[1:])
assert allclose(data.toarray(), a)
def test_from_array_bolt(eng):
a = | arange(8) | numpy.arange |
from __future__ import division
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import yaml
import scipy as sp
import os
abs_dir = os.path.dirname(os.path.abspath(__file__))
# Conversion constants
k_B = 1.38065e-23 # [J/K]
N_A = 6.02214e23 # [1/mol]
m3_to_nm3 = 1e27
m2_to_nm2 = 1e18
gm_to_kg = 1. / 1000
J_to_kJ = 1. / 1000
J_per_m3_to_kPA = 1. / 1000
D_to_sqrtJm3 = 3.1623e-25
class LennardJones_2C():
def __init__(self, M_w):
self.M_w = M_w
with open(abs_dir + "/data/DCLJQ_fluid.yaml") as yfile:
yfile = yaml.load(yfile) # ,Loader=yaml.FullLoader)
self.T_c_star_params = np.array(yfile["correlation_parameters"]
["Stoll"]["T_c_star_params"])
self.rho_c_star_params = np.array(yfile["correlation_parameters"]
["Stoll"]["rho_c_star_params"])
self.b_C1 = np.array(yfile["correlation_parameters"]
["Stoll"]["rho_L_star_params"]["C1_params"])
self.b_C2_L = np.array(yfile["correlation_parameters"]
["Stoll"]["rho_L_star_params"]["C2_params"])
self.b_C3_L = np.array(yfile["correlation_parameters"]
["Stoll"]["rho_L_star_params"]["C3_params"])
self.b_C2_v = np.array(yfile["correlation_parameters"]
["Stoll"]["rho_v_star_params"]["C2_params"])
self.b_C3_v = np.array(yfile["correlation_parameters"]
["Stoll"]["rho_v_star_params"]["C3_params"])
self.b_c1 = np.array(yfile["correlation_parameters"]["Stoll"]
["P_v_star_params"]["c1_params"])
self.b_c2 = np.array(yfile["correlation_parameters"]
["Stoll"]["P_v_star_params"]["c2_params"])
self.b_c3 = np.array(yfile["correlation_parameters"]
["Stoll"]["P_v_star_params"]["c3_params"])
self.A_a = np.array(yfile["correlation_parameters"]
["Werth"]["A_star_params"]["a_params"])
self.A_b = np.array(yfile["correlation_parameters"]
["Werth"]["A_star_params"]["b_params"])
self.A_c = np.array(yfile["correlation_parameters"]
["Werth"]["A_star_params"]["c_params"])
self.A_d = np.array(yfile["correlation_parameters"]
["Werth"]["A_star_params"]["d_params"])
self.A_e = np.array(yfile["correlation_parameters"]
["Werth"]["A_star_params"]["e_params"])
self.B = np.array(yfile["correlation_parameters"]
["Werth"]["A_star_params"]["B_params"])
def T_c_star_hat(self, q, l):
b = self.T_c_star_params
x = np.array([1, q**2, q**3, 1. / (0.1 + l**2), 1. / (0.1 + l**5),
q**2 / (0.1 + l**2), q**2 / (0.1 + l**5),
q**3 / (0.1 + l**2), q**3 / (0.1 + l**5)])
T_c_star = x * b
T_c_star = T_c_star.sum()
return T_c_star
def rho_c_star_hat(self, q, l):
b = self.rho_c_star_params
x = np.array([1, q**2, q**3, l**2 / (0.11 + l**2),
l**5 / (0.11 + l**5), l**2 * q**2 / (0.11 + l**2),
l**5 * q**2 / (0.11 + l**5), l**2 * q**3 / (0.11 + l**2),
l**5 * q**3 / (0.11 + l**5)])
rho_c_star = x * b
rho_c_star = rho_c_star.sum()
return rho_c_star
def C1_hat(self, q, l, b):
x_C1 = np.array([1, q**2, q**3, l**3 / (l + 0.4)**3,
l**4 / (l + 0.4)**5, q**2 * l**2 / (l + 0.4),
q**2 * l**3 / (l + 0.4)**7, q**3 * l**2 / (l + 0.4),
q**3 * l**3 / (l + 0.4)**7])
C1 = x_C1 * b
C1 = C1.sum()
return C1
def C2_hat(self, q, l, b):
x_C2 = np.array([1, q**2, q**3, l**2, l**3, q**2 * l**2,
q**2 * l**3, q**3 * l**2])
C2 = x_C2 * b
C2 = C2.sum()
return C2
def C3_hat(self, q, l, b):
x_C3 = np.array([1, q**2, q**3, l, l**4,
q**2 * l, q**2 * l**4, q**3 * l**4])
C3 = x_C3 * b
C3 = C3.sum()
return C3
def rho_star_hat_2CLJQ(self, T_star, q, l, phase):
b_C1, b_C2_L, b_C3_L, b_C2_v, b_C3_v = self.b_C1, self.b_C2_L, \
self.b_C3_L, self.b_C2_v, self.b_C3_v
T_c_star = self.T_c_star_hat(q, l)
rho_c_star = self.rho_c_star_hat(q, l)
tau = T_c_star - T_star # T_c_star - T_star
if all(tau > 0):
x = np.ones([len(tau), 4]) # First column all ones
x[:, 1] = tau**(1. / 3)
x[:, 2] = tau
x[:, 3] = tau**(3. / 2)
C1 = self.C1_hat(q, l, b_C1)
if phase == 'liquid':
C2 = self.C2_hat(q, l, b_C2_L)
C3 = self.C3_hat(q, l, b_C3_L)
b = np.array([rho_c_star, C1, C2, C3])
elif phase == 'vapor':
C2 = self.C2_hat(q, l, b_C2_v)
C3 = self.C3_hat(q, l, b_C3_v)
b = np.array([rho_c_star, -C1, C2, C3])
else:
return 0
# rho_star = b[0]+b[1]*tau**(1./3)+b[2]*tau+b[3]*tau**(3./2)
# The brute force approach
rho_star = x * b
# To add up the rows (that pertain to a specific T_star)
rho_star = rho_star.sum(axis=1)
else:
rho_star = np.zeros([len(tau)])
return rho_star
def rho_hat_2CLJQ(self, Temp, eps, sig, Lbond, Qpole, phase):
'''
inputs:
Temp: temperature [K]
eps: epsilon/kb [K]
sig: sigma [nm]
Lbond: bond-length [nm]
Qpole: quadrupole [Debye * nm]
phase: liquid or vapor
outputs:
rho: density [kg/m3]
'''
M_w = self.M_w
T_star = Temp / eps # note that eps is defined as eps/kB
Qpole = Qpole * D_to_sqrtJm3 # [(J*m3)^(1/2) nm]
Q2pole = Qpole**2 * m3_to_nm3 # [J*nm5]
Q2_star = Q2pole / (eps * k_B * sig**5)
# note that eps is defined as eps/kB
L_star = Lbond / sig
rho_star = self.rho_star_hat_2CLJQ(T_star, Q2_star, L_star, phase)
rho = rho_star * M_w / sig**3 / N_A * m3_to_nm3 * gm_to_kg # [kg/m3]
return rho
def rhol_hat_2CLJQ(self, Temp, eps, sig, Lbond, Qpole):
rhol = self.rho_hat_2CLJQ(Temp, eps, sig, Lbond, Qpole, 'liquid')
return rhol # [kg/m3]
def rhov_hat_2CLJQ(self, Temp, eps, sig, Lbond, Qpole):
rhov = self.rho_hat_2CLJQ(Temp, eps, sig, Lbond, Qpole, 'vapor')
return rhov # [kg/m3]
def Psat_star_hat_2CLJQ(self, T_star, q, l):
b_c1, b_c2, b_c3 = self.b_c1, self.b_c2, self.b_c3
x_c1 = [1.,
q**2,
q**3,
l**2 / (l**2 + 0.75),
l**3 / (l**3 + 0.75),
l**2 * q**2 / (l**2 + 0.75),
l**3 * q**2 / (l**3 + 0.75),
l**2 * q**3 / (l**2 + 0.75),
l**3 * q**3 / (l**3 + 0.75)]
x_c2 = [1.,
q**2,
q**3,
l**2 / (l + 0.75)**2,
l**3 / (l + 0.75)**3,
l**2 * q**2 / (l + 0.75)**2,
l**3 * q**2 / (l + 0.75)**3,
l**2 * q**3 / (l + 0.75)**2,
l**3 * q**3 / (l + 0.75)**3]
x_c3 = [q**2, q**5, l**0.5]
c1 = (x_c1 * b_c1).sum()
c2 = (x_c2 * b_c2).sum()
c3 = (x_c3 * b_c3).sum()
Psat_star = np.exp(c1 + c2 / T_star + c3 / (T_star**4))
return Psat_star
def Psat_hat_2CLJQ(self, Temp, eps, sig, Lbond, Qpole):
'''
inputs:
Temp: temperature [K]
eps: epsilon/kb [K]
sig: sigma [nm]
Lbond: bond-length [nm]
Qpole: quadrupole [Debye * nm]
outputs:
Psat: vapor pressure [kPa]
'''
T_star = Temp / eps # note that eps is defined as eps/kB
Qpole = Qpole * D_to_sqrtJm3 # [(J*m3)^(1/2) nm]
Q2pole = Qpole**2 * m3_to_nm3 # [J*nm5]
Q2_star = Q2pole / (eps * k_B * sig**5)
# note that eps is defined as eps/kB
L_star = Lbond / sig
Psat_star = self.Psat_star_hat_2CLJQ(T_star, Q2_star, L_star)
Psat = Psat_star * eps / sig**3 * k_B * m3_to_nm3 * \
J_per_m3_to_kPA # [kPa] #note that eps is defined as eps/kB
return Psat
def LJ_model(self, r, eps, sig):
r_star = r / sig
U = 4 * eps * (r_star**(-12) - r_star**(-6))
return U
def Astar_hat(self, q, l):
a, b, c, d, e = self.A_a, self.A_b, self.A_c, self.A_d, self.A_e
x_a = np.array([1])
x_b = np.array([q, q**2., q**3.])
x_c = np.array([1. / (l**2. + 0.1)])
x_d = np.array([q**2. * l**2., q**2. * l**3.])
x_e = | np.array([q**2 / (l**2. + 0.1), q**2. / (l**5. + 0.1)]) | numpy.array |
import numpy as np
import pdb, warnings, pickle
from numpy.polynomial.legendre import leggauss
from numpy.polynomial.hermite_e import hermegauss
import itertools
from scipy.special import comb
"""
This code has been heavily modified and adapted from UQTk (https://github.com/sandialabs/UQTk) and Chaospy (This code has been adapted from Chaospy (https://github.com/jonathf/chaospy) using custom multiindex generation and circumventing the use of external libraries like numpoly)
The modifications are substantial to (1) allow for quadrature generation using pure Python code and (2) remove the need for any third party extraneous polynomial libraries other than numpy.
"""
class QuadBase:
def __init__(self, nquad):
self.nquad = nquad
def get1dQuad(self):
pass
class LegendreQuad(QuadBase):
def __init__(self, nquad=2):
super().__init__(nquad)
def get1dQuad(self, nquad=None):
if nquad is not None:
self.nquad = nquad
x, w = leggauss(self.nquad)
# rescale weights to sum to 1
w = w / 2.0
return x, w
class HermiteQuad(QuadBase):
"""normalized"""
def __init__(self, nquad=2):
super().__init__(nquad)
def get1dQuad(self, nquad=None):
if nquad is not None:
self.nquad = nquad
x, w = hermegauss(self.nquad)
return x, w # add a factor of (2*np.pi)**-.5 to normalize each dimension
class ClenshawCurtis(QuadBase):
def __init__(self, nquad=2):
super().__init__(nquad)
def _get1dQuad(self, nquad=None):
"""old"""
if nquad is not None:
self.nquad = nquad
if self.nquad == 1:
return np.array([0.0]), np.array([2.0])
else:
n = self.nquad
x = np.cos(np.pi * (n - 1 - np.arange(n)) / (n - 1))
w = np.ones(len(x))
for i in range(n):
theta = i * np.pi / (n - 1)
for j in range(1, int(0.5 * (n - 1) + 1)):
if 2 * j == n - 1:
f = 1.0
else:
f = 2.0
w[i] -= f * np.cos(2.0 * j * theta) / (4.0 * j ** 2 - 1)
w[0] /= n - 1
w[1:-1] = 2 * w[1:-1] / (n - 1)
w[-1] *= 1.0 / (n - 1)
return x, w
def get1dQuad(self, nquad=None):
"""from chaospy"""
if nquad is not None:
self.nquad = nquad
degree = self.nquad
n = self.nquad
if n == 1:
points = np.array([0.0])
weights = np.array([2.0])
else:
points = -np.cos((np.pi * np.arange(n)) / (n - 1))
if n == 2:
weights = np.array([1.0, 1.0])
else:
n -= 1
N = np.arange(1, n, 2)
length = len(N)
m = n - length
v0 = np.concatenate(
[2.0 / N / (N - 2), np.array([1.0 / N[-1]]), np.zeros(m)]
)
v2 = -v0[:-1] - v0[:0:-1]
g0 = -np.ones(n)
g0[length] += n
g0[m] += n
g = g0 / (n ** 2 - 1 + (n % 2))
w = np.fft.ihfft(v2 + g)
assert max(w.imag) < 1.0e-15
w = w.real
if n % 2 == 1:
weights = np.concatenate([w, w[::-1]])
else:
weights = np.concatenate([w, w[len(w) - 2 :: -1]])
weights = weights / 2.0
return points, weights
class QuadFactory:
# generates QuadBase class object
@staticmethod
def newQuad(quadtype="legendre_gauss"):
if quadtype == "legendre_gauss":
Q = LegendreQuad()
if quadtype == "clenshaw_curtis":
Q = ClenshawCurtis()
if quadtype == "hermite_gauss":
Q = HermiteQuad()
return Q
class QuadRule:
def __init__(self, x, w):
self.x = x
self.w = w
self.n = len(w)
if x.ndim == 1:
self.dim = 1
self.x = np.atleast_2d(x).T # col vector
else:
self.dim = x.shape[1]
assert len(x) == len(w), "x and w dont habe the same # of points"
def __add__(self, other):
assert self.dim == other.dim, "Dimensions do not match!"
xnew = np.vstack([self.x, other.x])
wnew = np.hstack([self.w, other.w])
Qnew = QuadRule(xnew, wnew)
return Qnew
def __sub__(self, other):
assert self.dim == other.dim, "Dimensions do not match!"
xnew = np.vstack([self.x, other.x])
wnew = np.hstack([self.w, -1 * other.w])
Qnew = QuadRule(xnew, wnew)
return Qnew
def __mul__(self, other):
# tensor product
index_comb = list(itertools.product(range(self.n), range(other.n)))
xnew = [np.concatenate([self.x[i[0]], other.x[i[1]]]) for i in index_comb]
wnew = [self.w[i[0]] * other.w[i[1]] for i in index_comb]
Qnew = QuadRule(np.array(xnew), np.array(wnew))
return Qnew
def copy(self):
return QuadRule(self.x, self.w)
class QuadOps:
@staticmethod
def getMultiIndexLevel(level, ndim):
"""returns the multindices of order = level"""
iup = 0
nup_level = int(comb(ndim + level - 1, level))
M = np.zeros((nup_level, ndim))
if ndim == 1:
M[0, 0] = level
else:
for first in range(level, -1, -1):
theRest = QuadOps.getMultiIndexLevel(level - first, ndim - 1)
for j in range(len(theRest)):
# print(iup,j)
M[iup, 0] = first
M[iup, 1:ndim] = theRest[j, 0 : ndim - 1]
iup += 1
return M
@staticmethod
def compressRule(Q):
# assert self.rule_ is not None, "Must set rule first."
# convert numpy array to list of tuples
xtuple = [tuple(xi) for xi in Q.x]
# create a dictionary
from collections import defaultdict
dd = defaultdict(list)
for ii, xi in enumerate(xtuple):
dd[xi].append(Q.w[ii])
# sum weights over keys
for key in dd:
dd[key] = np.sum(dd[key])
x = np.array(list(dd.keys()))
w = np.array([dd[key] for key in dd])
x = x[np.abs(w) > 1e-12]
w = w[np.abs(w) > 1e-12]
return QuadRule(x, w)
# deprecated for sparse, but good for full tensor product grid
class QuadBuilder:
def __init__(self, grid_type="sparse", order=2, quad_type="Legendre"):
self.grid_type = grid_type
self.quad_type = quad_type
self.order = order
self.ndim = None
self.growth_rule = None
def SetRule(self, ndim):
self.ndim = ndim
if self.grid_type == "full":
self._full()
if self.grid_type == "sparse":
if self.quad_type == "legendre_gauss":
self.growth_rule = 0
self._sparse()
return self
def _full(self):
# cannot do mixed quad yet. Easy if quad type takes in array
quad_gen = QuadFactory.newQuad(self.quad_type)
x, w = quad_gen.get1dQuad(nquad=self.order + 1) # 0th order means 1 point
q1d = QuadRule(x, w)
qnew = q1d.copy()
for i in range(1, self.ndim):
qnew = qnew * q1d
q = qnew.copy()
self.rule_ = q
def _sparse(self):
for nlevel in range(-1, self.order):
self._SetNextLevel2(nlevel)
def _SetNextLevel(self, nlevel):
nlevel += 1
M = QuadOps.getMultiIndexLevel(nlevel, self.ndim)
nM = M.shape[0]
M_npts = np.zeros((nM, self.ndim))
quad_gen = QuadFactory.newQuad(self.quad_type)
for j in range(nM):
Mj = M[j] # jth row of the multiindexlevel
# 1 if Mj == 0, 3 if 1, else (Mj_i+1)^2
if self.growth_rule == 0:
npts = 1 * (Mj == 0) + 3 * (Mj == 1) + ((Mj) ** 2 + 1) * (Mj > 1)
npts_1 = 0 * (Mj == 0) + 1 * (Mj == 1) + ((Mj - 1) ** 2 + 1) * (Mj > 1)
elif self.growth_rule == 1:
npts = ((Mj + 1) ** 2 - 1) * (Mj > 1)
npts_1 = (Mj ** 2 - 1) * (Mj > 1)
npts = npts.astype("int")
npts_1 = npts_1.astype("int")
print(npts, npts_1)
xw = [quad_gen.get1dQuad(nquad=int(n)) for n in list(npts)]
rules = [QuadRule(xwi[0], xwi[1]) for xwi in xw]
xw_1 = [quad_gen.get1dQuad(nquad=int(n_1)) for n_1 in list(npts_1)]
rules_1 = [QuadRule(xwi_1[0], xwi_1[1]) for xwi_1 in xw_1]
srules = []
for ii in range(len(npts)):
if npts_1[ii] > 0:
srules.append(rules[ii] - rules_1[ii])
else:
srules.append(rules[ii])
# multiply rules in srules
r = srules[0].copy()
for ri in srules[1:]:
r = r * ri
if j == 0:
rule_level = r.copy()
else:
rule_level = r + rule_cur
rule_cur = rule_level.copy()
# pdb.set_trace()
if nlevel == 0:
rule_total = rule_level.copy()
else:
rule_total = self.rule_ + rule_level
self.rule_ = rule_total.copy()
return self
def _SetNextLevel2(self, nlevel):
nlevel += 1
M = QuadOps.getMultiIndexLevel(nlevel, self.ndim)
self.M = M
# nM = M.shape[0]
M_npts = | np.zeros(M.shape) | numpy.zeros |
#!/usr/bin/env python
# coding: utf-8
# # Recommendations with IBM
#
# In this notebook, you will be putting your recommendation skills to use on real data from the IBM Watson Studio platform.
#
#
# You may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/#!/rubrics/2322/view). **Please save regularly.**
#
# By following the table of contents, you will build out a number of different methods for making recommendations that can be used for different situations.
#
#
# ## Table of Contents
#
# I. [Exploratory Data Analysis](#Exploratory-Data-Analysis)<br>
# II. [Rank Based Recommendations](#Rank)<br>
# III. [User-User Based Collaborative Filtering](#User-User)<br>
# IV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](#Content-Recs)<br>
# V. [Matrix Factorization](#Matrix-Fact)<br>
# VI. [Extras & Concluding](#conclusions)
#
# At the end of the notebook, you will find directions for how to submit your work. Let's get started by importing the necessary libraries and reading in the data.
# In[98]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import project_tests as t
import pickle
get_ipython().run_line_magic('matplotlib', 'inline')
df = pd.read_csv('data/user-item-interactions.csv')
df_content = pd.read_csv('data/articles_community.csv')
del df['Unnamed: 0']
del df_content['Unnamed: 0']
# Show df to get an idea of the data
df.head()
# In[99]:
# Show df_content to get an idea of the data
df_content.head()
# ### <a class="anchor" id="Exploratory-Data-Analysis">Part I : Exploratory Data Analysis</a>
#
# Use the dictionary and cells below to provide some insight into the descriptive statistics of the data.
#
# `1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article.
# In[ ]:
# In[100]:
users_by_article = df.groupby('article_id')['email'].count()
article_by_users = df.groupby('email')['article_id'].count()
print('USERS BY ARTICLE\n\n ',users_by_article,'\n\n\n\n\nARTICLE BY USERS\n','\n', article_by_users)
# In[101]:
# Fill in the median and maximum number of user_article interactios below
median_val = article_by_users.median()# 50% of individuals interact with ____ number of articles or fewer.
max_views_by_user = article_by_users.max()# The maximum number of user-article interactions by any 1 user is ______.
print("Median : ", median_val,"\nMax Views by User:", max_views_by_user)
# In[102]:
df_content.shape
# In[103]:
# CHANGE IT WITH BETTER VISUALIZATION
interactions_count = df.groupby('email')['article_id'].count().values
plt.figure()
plt.plot(interactions_count)
plt.xlabel('Users')
plt.ylabel('Interaction count')
plt.title('User Interaction with Articles')
plt.show()
# In[104]:
df_content.duplicated(['doc_body', 'doc_description', 'doc_full_name']).sum()
# In[ ]:
# `2.` Explore and remove duplicate articles from the **df_content** dataframe.
# In[105]:
v=df_content.duplicated().sum()
y=df_content.duplicated('article_id').sum()
z=df_content.duplicated('doc_body').sum()
xx= df_content.duplicated(['doc_body', 'doc_description', 'doc_full_name']).sum()
print('Number of full duplicates: ', v)
print('Number of duplicates by id: ', y)
print('Number of duplicates by doc_body: ', z)
print('Number of duplicates by whole content:',xx)
# In[106]:
# Remove any rows that have the same article_id - only keep the first
df_content_clean=df_content.drop_duplicates('article_id',keep='first')
df_content_clean.head()
# `3.` Use the cells below to find:
#
# **a.** The number of unique articles that have an interaction with a user.
# **b.** The number of unique articles in the dataset (whether they have any interactions or not).<br>
# **c.** The number of unique users in the dataset. (excluding null values) <br>
# **d.** The number of user-article interactions in the dataset.
# In[ ]:
# In[107]:
unique_articles=df['article_id'].nunique() # number of unique articles
total_articles=df_content_clean['article_id'].nunique() # number of total unique articles
unique_users=df['email'].nunique() # number of unique users
user_article_interactions=len(df) # number of user-article interactions
# In[108]:
unique_articles, total_articles, unique_users, user_article_interactions
# `4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below).
# In[ ]:
# In[109]:
most_viewed_article_id = str(df.article_id.value_counts().index.values[0])# The most viewed article in the dataset as a string with one value following the decimal
max_views = df.article_id.value_counts().max()# The most viewed article in the dataset was viewed how many times?
# In[110]:
## No need to change the code here - this will be helpful for later parts of the notebook
# Run this cell to map the user email to a user_id column and remove the email column
def email_mapper():
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
email_encoded = email_mapper()
del df['email']
df['user_id'] = email_encoded
# show header
df.head()
# In[111]:
## If you stored all your results in the variable names above,
## you shouldn't need to change anything in this cell
sol_1_dict = {
'`50% of individuals have _____ or fewer interactions.`': median_val,
'`The total number of user-article interactions in the dataset is ______.`': user_article_interactions,
'`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user,
'`The most viewed article in the dataset was viewed _____ times.`': max_views,
'`The article_id of the most viewed article is ______.`': most_viewed_article_id,
'`The number of unique articles that have at least 1 rating ______.`': unique_articles,
'`The number of unique users in the dataset is ______`': unique_users,
'`The number of unique articles on the IBM platform`': total_articles
}
# Test your dictionary against the solution
t.sol_1_test(sol_1_dict)
# ### <a class="anchor" id="Rank">Part II: Rank-Based Recommendations</a>
#
# Unlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with.
#
# `1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below.
# In[112]:
df['article_id'] = df['article_id'].astype('str')
df['user_id'] = df['user_id'].astype('int')
# In[113]:
def get_top_articles(n, df=df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
article_ids_ordered= df.groupby('article_id')['user_id'].count().sort_values(ascending=False).index
top_articles=list(df.set_index('article_id')['title'].loc[article_ids_ordered].unique()[:n])
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df=df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
top_articles = list(df.groupby('article_id')['user_id'].count().sort_values(ascending=False).index[:n])
return top_articles # Return the top article ids
# In[114]:
print(get_top_articles(10))
print(get_top_article_ids(10))
# In[115]:
# Test your function by returning the top 5, 10, and 20 articles
top_5 = get_top_articles(5)
top_10 = get_top_articles(10)
top_20 = get_top_articles(20)
# Test each of your three lists from above
t.sol_2_test(get_top_articles)
# ### <a class="anchor" id="User-User">Part III: User-User Based Collaborative Filtering</a>
#
#
# `1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns.
#
# * Each **user** should only appear in each **row** once.
#
#
# * Each **article** should only show up in one **column**.
#
#
# * **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1.
#
#
# * **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**.
#
# Use the tests to make sure the basic structure of your matrix matches what is expected by the solution.
# In[116]:
# create the user-article matrix with 1's and 0's
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
user_item=pd.pivot_table(df.drop_duplicates(['user_id', 'article_id']), values='title',
index='user_id', columns='article_id',aggfunc='count', fill_value=0)
return user_item # return the user_item matrix
user_item = create_user_item_matrix(df)
# In[117]:
## Tests: You should just need to run this cell. Don't change the code.
assert user_item.shape[0] == 5149, "Oops! The number of users in the user-article matrix doesn't look right."
assert user_item.shape[1] == 714, "Oops! The number of articles in the user-article matrix doesn't look right."
assert user_item.sum(axis=1)[1] == 36, "Oops! The number of articles seen by user 1 doesn't look right."
print("You have passed our quick tests! Please proceed!")
# `2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users.
#
# Use the tests to test your function.
# In[ ]:
# In[118]:
def find_similar_users(user_id, user_item=user_item):
'''
INPUT:
user_id - (int) a user_id
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# compute similarity of each user to the provided user
# sort by similarity
# create list of just the ids
# remove the own user's id
# compute similarity of each user to the provided user
similarity = user_item[user_item.index == user_id].dot(user_item.T)
# sort by similarity
# create list of just the ids
most_similar_users = similarity.sort_values(user_id, axis=1, ascending=False).columns.tolist()
# remove the own user's id
most_similar_users.remove(user_id)
return most_similar_users # return a list of the users in order from most to least similar
# In[119]:
# Do a spot check of your function
print("The 10 most similar users to user 1 are: {}".format(find_similar_users(1)[:10]))
print("The 5 most similar users to user 3933 are: {}".format(find_similar_users(3933)[:5]))
print("The 3 most similar users to user 46 are: {}".format(find_similar_users(46)[:3]))
# `3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user.
# In[120]:
def get_article_names(article_ids, df=df):
'''
INPUT:
article_ids - (list) a list of article ids
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the title column)
'''
# Your code here
article_names = (df.drop_duplicates('article_id').set_index('article_id').loc[article_ids]['title'].tolist())
return article_names # Return the article names associated with list of article ids
def get_user_articles(user_id, user_item=user_item):
'''
INPUT:
user_id - (int) a user id
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
each_user=user_item.loc[user_id]
article_ids=each_user[each_user.where(each_user==1).isnull()==False].index.values
article_names=get_article_names(article_ids)
return article_ids, article_names # return the ids and names
def user_user_recs(user_id, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
Users who are the same closeness are chosen arbitrarily as the 'next' user
For the user where the number of recommended articles starts below m
and ends exceeding m, the last items are chosen arbitrarily
'''
# Your code here
#loop user closes similar_user
#find articles that user didn't watched
#break m recommendation
# Articles about particular user
similar_users=find_similar_users(user_id)
articles_seen_by_user=get_user_articles(user_id)[0]
# Articles about similar user
recs=set()
for i in similar_users:
articles_seen_by_similar=get_user_articles(i)[0]
articles_will_recommend=set(articles_seen_by_similar)-set(articles_seen_by_user)
recs.update(articles_will_recommend)
if len(recs) >= m:
break
recs=list(recs)[:m]
return recs # return your recommendations for this user_id
# In[121]:
# Check Results
get_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1
# In[122]:
# Test your functions here - No need to change this code - just run this cell
assert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), "Oops! Your the get_article_names function doesn't work quite how we expect."
assert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), "Oops! Your the get_article_names function doesn't work quite how we expect."
assert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0'])
assert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook'])
assert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])
assert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis'])
print("If this is all you see, you passed all of our tests! Nice job!")
# `4.` Now we are going to improve the consistency of the **user_user_recs** function from above.
#
# * Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions.
#
#
# * Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier.
# In[127]:
def get_top_sorted_users(user_id, df=df, user_item=user_item):
'''
INPUT:
user_id - (int)
df - (pandas dataframe) df as defined at the top of the notebook
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# Your code here
most_active_users_df=df.groupby('user_id')['article_id'].count().rename('num_interactions').reset_index()
user_similarity=user_item[user_item.index == user_id].dot(user_item.T).loc[user_id].rename('similarity').reset_index()
neighbors_df=user_similarity.merge(most_active_users_df).set_index('user_id')
neighbors_df.index.rename('neighbor_id',inplace=True)
neighbors_df = neighbors_df.drop([user_id])
neighbors_df = neighbors_df.sort_values(['similarity','num_interactions'],ascending=[False,False])
return neighbors_df # Return the dataframe specified in the doc_string
def user_user_recs_part2(user_id, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
# Your code here
top_articles=get_top_articles(None)
articles_seen_by_user=get_user_articles(user_id)[0]
neighbors_df=get_top_sorted_users(user_id)
# Articles about similar user
recs=set()
for i in neighbors_df.index:
articles_seen_by_neighbor=get_user_articles(i)[0]
articles_new_recommend=set(articles_seen_by_neighbor)-set(articles_seen_by_user)
if len(articles_new_recommend) + len(recs) >=m:
articles_new_recommend = [article_id for article_id in top_articles if article_id in articles_new_recommend]
recs.update(articles_new_recommend)
if len(recs) >= m:
break
recs=list(recs)[:m]
rec_names=get_article_names(recs)
return recs, rec_names
# In[128]:
# Quick spot check - don't change this code - just use it to test your functions
rec_ids, rec_names = user_user_recs_part2(20, 10)
print("The top 10 recommendations for user 20 are the following article ids:")
print(rec_ids)
print()
print("The top 10 recommendations for user 20 are the following article names:")
print(rec_names)
# `5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below.
# In[131]:
### Tests with a dictionary of results
user1_most_sim = get_top_sorted_users(1).index[0]# Find the user that is most similar to user 1
user131_10th_sim = get_top_sorted_users(131).index[9]# Find the 10th most similar user to user 131
# In[133]:
user1_most_sim
# In[134]:
## Dictionary Test Here
sol_5_dict = {
'The user that is most similar to user 1.': user1_most_sim,
'The user that is the 10th most similar to user 131': user131_10th_sim,
}
t.sol_5_test(sol_5_dict)
# `6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users.
"""
**Provide your response here.**
Since we have past user data, we can recommend top 'x' articles with the most user interactions for new users. There are two functions above for getting 'top articles'
and 'top articles ids'. We can use both 'get_top_articles()' and 'get_top_articles_ids()' functions for recommend article for newcomers.
We also can ask to user which fields are most interesting fields for them. Than we can use 'Knowledge Based Recommendation' with that feed,
and provide to user better recommendations.
"""
# `7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation.
# In[135]:
new_user = '0.0'
# What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles.
# Provide a list of the top 10 article ids you would give to
new_user_recs = get_top_articles(n=10)# Your recommendations here
new_user_recs_ids=get_top_article_ids(n=10)
# In[137]:
new_user_recs
# In[138]:
assert set(new_user_recs_ids) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), "Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users."
print("That's right! Nice job!")
# ### <a class="anchor" id="Content-Recs">Part IV: Content Based Recommendations (EXTRA - NOT REQUIRED)</a>
#
# Another method we might use to make recommendations is to perform a ranking of the highest ranked articles associated with some term. You might consider content to be the **doc_body**, **doc_description**, or **doc_full_name**. There isn't one way to create a content based recommendation, especially considering that each of these columns hold content related information.
#
# `1.` Use the function body below to create a content based recommender. Since there isn't one right answer for this recommendation tactic, no test functions are provided. Feel free to change the function inputs if you decide you want to try a method that requires more input values. The input values are currently set with one idea in mind that you may use to make content based recommendations. One additional idea is that you might want to choose the most popular recommendations that meet your 'content criteria', but again, there is a lot of flexibility in how you might make these recommendations.
#
# ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
# In[ ]:
def make_content_recs():
'''
INPUT:
OUTPUT:
'''
# `2.` Now that you have put together your content-based recommendation system, use the cell below to write a summary explaining how your content based recommender works. Do you see any possible improvements that could be made to your function? Is there anything novel about your content based recommender?
#
# ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
# **Write an explanation of your content based recommendation system here.**
# `3.` Use your content-recommendation system to make recommendations for the below scenarios based on the comments. Again no tests are provided here, because there isn't one right answer that could be used to find these content based recommendations.
#
# ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
# In[ ]:
# make recommendations for a brand new user
# make a recommendations for a user who only has interacted with article id '1427.0'
# ### <a class="anchor" id="Matrix-Fact">Part V: Matrix Factorization</a>
#
# In this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform.
#
# `1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook.
# In[139]:
# Load the matrix here
user_item_matrix = pd.read_pickle('user_item_matrix.p')
# In[140]:
# quick look at the matrix
user_item_matrix.head()
# `2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson.
# In[141]:
# Perform SVD on the User-Item Matrix Here
u, s, vt = np.linalg.svd(user_item_matrix) #use the built in to get the three matrices
# In[143]:
u.shape, s.shape, vt.shape
"""
**Provide your response here.**
In lessons, we learned about SVD and FunkSVD. The most important difference between them we learned was that we can't use SVD when we have NaN values in our matrix.
In IBM Watson project, we don't have any NaN values in our user_item_matrix dataset. It contains only 0 and 1 values. So we can traditional SVD instead of FunkSVD here..
"""
# `3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features.
# In[144]:
num_latent_feats = np.arange(10,700+10,20)
sum_errs = []
for k in num_latent_feats:
# restructure with k latent features
s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :]
# take dot product
user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new))
# compute error for each prediction to actual value
diffs = np.subtract(user_item_matrix, user_item_est)
# total errors and keep track of them
err = np.sum(np.sum(np.abs(diffs)))
sum_errs.append(err)
plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]);
plt.xlabel('Number of Latent Features');
plt.ylabel('Accuracy');
plt.title('Accuracy vs. Number of Latent Features');
# `4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below.
#
# Use the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below:
#
# * How many users can we make predictions for in the test set?
# * How many users are we not able to make predictions for because of the cold start problem?
# * How many articles can we make predictions for in the test set?
# * How many articles are we not able to make predictions for because of the cold start problem?
# In[145]:
df.head()
# In[146]:
df_train = df.head(40000)
df_test = df.tail(5993)
def create_test_and_train_user_item(df_train, df_test):
'''
INPUT:
df_train - training dataframe
df_test - test dataframe
OUTPUT:
user_item_train - a user-item matrix of the training dataframe
(unique users for each row and unique articles for each column)
user_item_test - a user-item matrix of the testing dataframe
(unique users for each row and unique articles for each column)
test_idx - all of the test user ids
test_arts - all of the test article ids
'''
# Your code here
user_item_train = create_user_item_matrix(df_train)
user_item_test = create_user_item_matrix(df_test)
test_idx = user_item_test.index.tolist()
test_arts = user_item_test.columns.tolist()
return user_item_train, user_item_test, test_idx, test_arts
user_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test)
# In[147]:
test_non_predicted_users = set(test_idx) - set(df_train['user_id'])
test_predicted_users= set(user_item_test.index)-test_non_predicted_users
test_non_predicted_articles=set(test_arts)-set(df_train['article_id'])
test_predicted_articles= set(user_item_test.columns)-test_non_predicted_articles
# In[148]:
len(test_non_predicted_users), len(test_predicted_users), len(test_non_predicted_articles), len(test_predicted_articles)
# In[150]:
# Replace the values in the dictionary below
a = 662
b = 574
c = 20
d = 0
sol_4_dict = {
'How many users can we make predictions for in the test set?': c,# letter here,
'How many users in the test set are we not able to make predictions for because of the cold start problem?': a,# letter here,
'How many movies can we make predictions for in the test set?': b,# letter here,
'How many movies in the test set are we not able to make predictions for because of the cold start problem?':d # letter here
}
t.sol_4_test(sol_4_dict)
# `5.` Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`.
#
# Use the cells below to explore how well SVD works towards making predictions for recommendations on the test data.
# In[151]:
# fit SVD on the user_item_train matrix
u_train, s_train, vt_train = np.linalg.svd(user_item_train)# fit svd similar to above then use the cells below
# In[ ]:
# Use these cells to see how well you can use the training
# decomposition to predict on test data
# In[152]:
# Use u_train with row idx because of user impact
test_rows_idx = user_item_train.index.isin(test_idx)
u_test = u_train[test_rows_idx, :]
# In[153]:
# use vt_train with col idx because of article impact
test_col_idxs = user_item_train.columns.isin(test_arts)
vt_test = vt_train[:, test_col_idxs]
# In[154]:
user_present_both = np.intersect1d(user_item_test.index, user_item_train.index)
user_item_test_predictable = user_item_test[user_item_test.index.isin(user_present_both)]
# initialize testing parameters
num_latent_feats = np.arange(10,700+10,20)
sum_errs_train = []
sum_errs_test = []
for k in num_latent_feats:
# restructure with k latent features for both training and test sets
s_train_lat, u_train_lat, vt_train_lat = np.diag(s_train[:k]), u_train[:, :k], vt_train[:k, :]
u_test_lat, vt_test_lat = u_test[:, :k], vt_test[:k, :]
# take dot product for both training and test sets
user_item_train_est = np.around(np.dot(np.dot(u_train_lat, s_train_lat), vt_train_lat))
user_item_test_est = np.around(np.dot(np.dot(u_test_lat, s_train_lat), vt_test_lat))
# compute error for each prediction to actual value
diffs_train = np.subtract(user_item_train, user_item_train_est)
diffs_test = np.subtract(user_item_test_predictable, user_item_test_est)
# total errors and keep track of them for both training and test sets
err_train = np.sum(np.sum( | np.abs(diffs_train) | numpy.abs |
import numpy as np
from scipy.stats import norm
from cmfsapy.dimension.correction import compute_mFSA_correction_coef, correct_estimates, polynom_func
import time
from constants import *
from scipy.stats import normaltest, shapiro
from statsmodels.stats.multitest import multipletests
t0 = time.time()
figsave_path = './'
#Load data
calibration_res = dict(np.load('./calibration_result/calibration_data_krange20_n2500_d80.npz'))
k = calibration_res['k']
D = calibration_res['d']
d = calibration_res['dims']
t1 = time.time()
E = D / d
print("k: ", k)
# start correction
K = 5
# powers = [-4, -3, -2, -1, 1, 2, 3]
# powers = [-2, -1, 1, 2]
powers = [-1, 1, 2, 3]
# powers = np.arange(0, 3, 0.5)
# powers = [-1, .5, 1, 2, 3]
# powers = [1]
coefs = compute_mFSA_correction_coef(d[:, :, K], E[:, :, K], powers)
cd = correct_estimates(d[:, :, K], coefs, powers)
print("coeficients:", coefs)
# np.save('coefs', coefs)
# np.save('powers', powers)
errors = cd-D[:, :, 0]
# computing empirical error probabilities
P_correct = norm.cdf(0.5, loc=errors.mean(axis=1), scale=errors.std(axis=1))\
- norm.cdf(-0.5, loc=errors.mean(axis=1), scale=errors.std(axis=1))
P_error = 1 - P_correct
P1 = P_error - norm.cdf(-1.5, loc=errors.mean(axis=1), scale=errors.std(axis=1)) \
- (1- norm.cdf(1.5, loc=errors.mean(axis=1), scale=errors.std(axis=1)))
P2 = P_error - P1- norm.cdf(-2.5, loc=errors.mean(axis=1), scale=errors.std(axis=1)) \
- (1- norm.cdf(2.5, loc=errors.mean(axis=1), scale=errors.std(axis=1)))
P3 = P_error - P1 - P2
fig = plt.figure(figsize=(9, 12))
plt.subplot(321)
plt.plot(D[:, 0, 0], correct_estimates(d[:, :, K], coefs, powers).round(), 'r.', alpha=0.01, ms=10)
plt.plot(D[:, 0, 0], correct_estimates(d[:, :, K], coefs, powers).round().mean(axis=1), '-', color='gold')
plt.plot(D[:, 0, 0], d[:, :, K], 'b.', alpha=0.01, ms=10)
plt.plot(D[:, 0, 0], D[:, 0, 0], 'k--')
plt.xlim([0, 80])
plt.ylim([0, 80])
plt.xlabel(r'$D$')
plt.ylabel(r'$\hat{d}$')
plt.subplot(322)
_ = plt.plot(d[:, :, K], np.log(E[:, :, K]), 'b.', ms=2)
_ = plt.plot(d[:, :, K].mean(axis=1), np.log(E[:, :, K]).mean(axis=1), '.', ms=5, color='gold')
x = np.arange(1, 48).astype(float)
y = x * np.exp(polynom_func(coefs, x, powers))
_ = plt.plot(x, | np.log(y/x) | numpy.log |
"""The lattice module define the class to handle 3D crystal lattices (the 14 Bravais lattices).
"""
import os
from pymicro.external import CifFile_module as CifFile
import enum
import functools
import math
import numpy as np
from numpy import pi, dot, transpose, radians
from matplotlib import pyplot as plt
class Crystal:
'''
The Crystal class to create any particular crystal structure.
A crystal instance is composed by:
* one of the 14 Bravais lattice
* a point basis (or motif)
'''
def __init__(self, lattice, basis=None, basis_labels=None, basis_sizes=None, basis_colors=None):
'''
Create a Crystal instance with the given lattice and basis.
This create a new instance of a Crystal object. The given lattice
is assigned to the crystal. If the basis is not specified, it will
be one atom at (0., 0., 0.).
:param lattice: the :py:class:`~pymicro.crystal.lattice.Lattice` instance of the crystal.
:param list basis: A list of tuples containing the position of the atoms in the motif.
:param list basis_labels: A list of strings containing the description of the atoms in the motif.
:param list basis_labels: A list of float between 0. and 1. (default 0.1) to sale the atoms in the motif.
:param list basis_colors: A list of vtk colors of the atoms in the motif.
'''
self._lattice = lattice
if basis == None:
# default to one atom at (0, 0, 0)
self._basis = [(0., 0., 0.)]
self._labels = ['?']
self._sizes = [0.1]
self._colors = [(0., 0., 1.)]
else:
self._basis = basis
self._labels = basis_labels
self._sizes = basis_sizes
self._colors = basis_colors
class CrystallinePhase:
def __init__(self, phase_id=1, name='unknown', lattice=None):
"""Create a new crystalline phase.
The `phase_id` attribute is used to identify the phase in data sets
where it can be referred to in phase_map for instance."""
self.phase_id = phase_id
self.name = name
self.description = ''
self.formula = ''
if lattice is None:
lattice = Lattice.cubic(1.0)
self.set_lattice(lattice)
# a list of C_IJ values
self.elastic_constants = []
def __repr__(self):
"""Generate a string representation of this instance."""
out = 'Phase %d (%s) \n\t-- ' % (self.phase_id, self.name)
out += self.get_lattice().__repr__()
if self.elastic_constants:
out += '\n\t-- elastic constants: %s' % self.elastic_constants
return out
def get_lattice(self):
"""Returns the crystal lattice."""
return self._lattice
def set_lattice(self, lattice):
"""Set the crystal lattice.
:param Lattice lattice: the crystal lattice.
"""
self._lattice = lattice
def get_symmetry(self):
"""Returns the type of `Symmetry` of the Lattice."""
return self.get_lattice().get_symmetry()
def to_dict(self):
d = {'phase_id': self.phase_id,
'name': self.name,
'description': self.description,
'formula': self.formula,
'symmetry': self.get_symmetry().to_string(),
'lattice_parameters': self.get_lattice().get_lattice_parameters(),
'lattice_parameters_unit': 'nm',
'elastic_constants': self.elastic_constants,
'elastic_constants_unit': 'MPa'
}
#print(d)
return d
@staticmethod
def from_dict(d):
sym = Symmetry.from_string(d['symmetry'])
lattice = Lattice.from_symmetry(sym, d['lattice_parameters'])
phase = CrystallinePhase(d['phase_id'], d['name'], lattice)
phase.description = d['description']
phase.formula = d['formula']
phase.elastic_constants = d['elastic_constants']
return phase
class Symmetry(enum.Enum):
"""
Class to describe crystal symmetry defined by its Laue class symbol.
"""
cubic = 'm3m'
hexagonal = '6/mmm'
orthorhombic = 'mmm'
tetragonal = '4/mmm'
trigonal = 'bar3m'
monoclinic = '2/m'
triclinic = 'bar1'
@staticmethod
def from_string(s):
if s == 'cubic':
return Symmetry.cubic
elif s == 'hexagonal':
return Symmetry.hexagonal
elif s == 'orthorhombic':
return Symmetry.orthorhombic
elif s == 'tetragonal':
return Symmetry.tetragonal
elif s == 'trigonal':
return Symmetry.trigonal
elif s == 'monoclinic':
return Symmetry.monoclinic
elif s == 'triclinic':
return Symmetry.triclinic
else:
return None
def to_string(self):
if self is Symmetry.cubic:
return 'cubic'
elif self is Symmetry.hexagonal:
return 'hexagonal'
elif self is Symmetry.orthorhombic:
return 'orthorhombic'
elif self is Symmetry.tetragonal:
return 'tetragonal'
elif self is Symmetry.trigonal:
return 'trigonal'
elif self is Symmetry.monoclinic:
return 'monoclinic'
elif self is Symmetry.triclinic:
return 'triclinic'
else:
return None
@staticmethod
def from_space_group(space_group_number):
"""Create an instance of the `Symmetry` class from a TSL symmetry
number.
:raise ValueError: if the space_group_number is not between 1 and 230.
:param int space_group_number: the number asociated with the
space group (between 1 and 230).
:return: an instance of the `Symmetry` class
"""
if space_group_number < 1 or space_group_number > 230:
raise ValueError('space_group_number must be between 1 and 230')
return None
if space_group_number <= 2:
return Symmetry.triclinic
elif space_group_number <= 15:
return Symmetry.monoclinic
elif space_group_number <= 74:
return Symmetry.orthorhombic
elif space_group_number <= 142:
return Symmetry.tetragonal
elif space_group_number <= 167:
return Symmetry.trigonal
elif space_group_number <= 194:
return Symmetry.hexagonal
else:
return Symmetry.cubic
@staticmethod
def from_tsl(tsl_number):
"""Create an instance of the `Symmetry` class from a TSL symmetry
number.
:return: an instance of the `Symmetry` class
"""
if tsl_number == 43:
return Symmetry.cubic
elif tsl_number == 62:
return Symmetry.hexagonal
elif tsl_number == 22:
return Symmetry.orthorhombic
elif tsl_number == 42:
return Symmetry.tetragonal
elif tsl_number == 32:
return Symmetry.trigonal
elif tsl_number == 2:
return Symmetry.monoclinic
elif tsl_number == 1:
return Symmetry.triclinic
else:
return None
def symmetry_operators(self, use_miller_bravais=False):
"""Define the equivalent crystal symmetries.
Those come from Randle & Engler, 2000. For instance in the cubic
crystal struture, for instance there are 24 equivalent cube orientations.
:returns array: A numpy array of shape (n, 3, 3) where n is the \
number of symmetries of the given crystal structure.
"""
if self is Symmetry.cubic:
sym = np.zeros((24, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[0., 0., -1.], [0., -1., 0.], [-1., 0., 0.]])
sym[2] = np.array([[0., 0., -1.], [0., 1., 0.], [1., 0., 0.]])
sym[3] = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
sym[4] = np.array([[0., 0., 1.], [0., 1., 0.], [-1., 0., 0.]])
sym[5] = np.array([[1., 0., 0.], [0., 0., -1.], [0., 1., 0.]])
sym[6] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[7] = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]])
sym[8] = np.array([[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]])
sym[9] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
sym[10] = np.array([[0., 1., 0.], [-1., 0., 0.], [0., 0., 1.]])
sym[11] = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
sym[12] = np.array([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]])
sym[13] = np.array([[0., 0., -1.], [-1., 0., 0.], [0., 1., 0.]])
sym[14] = np.array([[0., -1., 0.], [0., 0., 1.], [-1., 0., 0.]])
sym[15] = np.array([[0., 1., 0.], [0., 0., -1.], [-1., 0., 0.]])
sym[16] = np.array([[0., 0., -1.], [1., 0., 0.], [0., -1., 0.]])
sym[17] = np.array([[0., 0., 1.], [-1., 0., 0.], [0., -1., 0.]])
sym[18] = np.array([[0., -1., 0.], [0., 0., -1.], [1., 0., 0.]])
sym[19] = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]])
sym[20] = np.array([[-1., 0., 0.], [0., 0., 1.], [0., 1., 0.]])
sym[21] = np.array([[0., 0., 1.], [0., -1., 0.], [1., 0., 0.]])
sym[22] = np.array([[0., -1., 0.], [-1., 0., 0.], [0., 0., -1.]])
sym[23] = np.array([[-1., 0., 0.], [0., 0., -1.], [0., -1., 0.]])
elif self is Symmetry.hexagonal:
if use_miller_bravais:
# using the Miller-Bravais representation here
sym = np.zeros((12, 4, 4), dtype=np.int)
sym[0] = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
sym[1] = np.array([[0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
sym[2] = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]])
sym[3] = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
sym[4] = np.array([[0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1]])
sym[5] = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, -1]])
sym[6] = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
sym[7] = np.array([[0, 0, -1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]])
sym[8] = np.array([[0, -1, 0, 0], [0, 0, -1, 0], [-1, 0, 0, 0], [0, 0, 0, 1]])
sym[9] = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]])
sym[10] = np.array([[0, 0, -1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, -1]])
sym[11] = np.array([[0, -1, 0, 0], [0, 0, -1, 0], [-1, 0, 0, 0], [0, 0, 0, -1]])
else:
sym = np.zeros((12, 3, 3), dtype=np.float)
s60 = np.sin(60 * np.pi / 180)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[0.5, s60, 0.], [-s60, 0.5, 0.], [0., 0., 1.]])
sym[2] = np.array([[-0.5, s60, 0.], [-s60, -0.5, 0.], [0., 0., 1.]])
sym[3] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
sym[4] = np.array([[-0.5, -s60, 0.], [s60, -0.5, 0.], [0., 0., 1.]])
sym[5] = np.array([[0.5, -s60, 0.], [s60, 0.5, 0.], [0., 0., 1.]])
sym[6] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[7] = np.array([[0.5, s60, 0.], [s60, -0.5, 0.], [0., 0., -1.]])
sym[8] = np.array([[-0.5, s60, 0.], [s60, 0.5, 0.], [0., 0., -1.]])
sym[9] = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
sym[10] = np.array([[-0.5, -s60, 0.], [-s60, 0.5, 0.], [0., 0., -1.]])
sym[11] = np.array([[0.5, -s60, 0.], [-s60, -0.5, 0.], [0., 0., -1.]])
elif self is Symmetry.orthorhombic:
sym = np.zeros((4, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[2] = np.array([[-1., 0., -1.], [0., 1., 0.], [0., 0., -1.]])
sym[3] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
elif self is Symmetry.tetragonal:
sym = np.zeros((8, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]])
sym[2] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
sym[3] = np.array([[0., 1., 0.], [-1., 0., 0.], [0., 0., 1.]])
sym[4] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[5] = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
sym[6] = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]])
sym[7] = np.array([[0., -1., 0.], [-1., 0., 0.], [0., 0., -1.]])
elif self is Symmetry.triclinic:
sym = np.zeros((1, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
else:
raise ValueError('warning, symmetry not supported: %s' % self)
return sym
def move_vector_to_FZ(self, v):
"""
Move the vector to the Fundamental Zone of a given `Symmetry` instance.
:param v: a 3 components vector.
:return: a new 3 components vector in the fundamental zone.
"""
omegas = [] # list to store all the rotation angles
syms = self.symmetry_operators()
for sym in syms:
# apply symmetry to the vector and compute the corresponding angle
v_sym = np.dot(sym, v)
omega = 2 * np.arctan(np.linalg.norm(v_sym)) * 180 / np.pi
omegas.append(omega)
# the fundamental zone corresponds to the minimum angle
index = np.argmin(omegas)
return np.dot(syms[index], v)
def move_rotation_to_FZ(self, g, verbose=False):
"""Compute the rotation matrix in the Fundamental Zone of a given
`Symmetry` instance.
:param g: a 3x3 matrix representing the rotation.
:param verbose: flag for verbose mode.
:return: a new 3x3 matrix for the rotation in the fundamental zone.
"""
omegas = [] # list to store all the rotation angles
syms = self.symmetry_operators()
for sym in syms:
# apply the symmetry operator
om = np.dot(sym, g)
if verbose:
print(om)
print(om.trace())
# compute the Rodrigues vector of the corresponding orientation matrix
# from pymicro.crystal.microstructure import Orientation
# r = Orientation.OrientationMatrix2Rodrigues(om)
# print(r)
# and then the rotation angle
# omega = 2 * np.arctan(np.linalg.norm(r)) * 180 / np.pi
# todo: check if we can avoid computing the R vector
cw = 0.5 * (om.trace() - 1)
omega = np.arccos(cw)
omegas.append(omega)
index = np.argmin(omegas)
if verbose:
print(omegas)
print('moving to FZ, index = %d' % index)
return np.dot(syms[index], g)
def stiffness_matrix(self, elastic_constants):
"""Build the stiffness matrix for this symmetry using Voigt convention.
:param list elastic_constants: the elastic constants (the number must
correspond to the type of symmetry, eg 3 for cubic).
:return ndarray: a numpy array of shape (6, 6) representing
the stiffness matrix.
"""
if self is Symmetry.cubic:
if len(elastic_constants) != 3:
raise ValueError('Error: need 3 elastic constants for cubic '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C44 = elastic_constants
C = np.array([[C11, C12, C12, 0, 0, 0],
[C12, C11, C12, 0, 0, 0],
[C12, C12, C11, 0, 0, 0],
[ 0, 0, 0, C44, 0, 0],
[ 0, 0, 0, 0, C44, 0],
[ 0, 0, 0, 0, 0, C44]])
return C
elif self is Symmetry.hexagonal:
if len(elastic_constants) != 5:
raise ValueError('Error: need 5 elastic constants for hexagonal '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C33, C44 = elastic_constants
C66 = (C11 - C12) / 2
C = np.array([[C11, C12, C13, 0, 0, 0],
[C12, C11, C13, 0, 0, 0],
[C13, C13, C33, 0, 0, 0],
[ 0, 0, 0, C44, 0, 0],
[ 0, 0, 0, 0, C44, 0],
[ 0, 0, 0, 0, 0, C66]])
return C
elif self is Symmetry.tetragonal:
if len(elastic_constants) != 6:
raise ValueError('Error: need 6 elastic constants for tetragonal '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C33, C44, C66 = elastic_constants
C = np.array([[C11, C12, C13, 0, 0, 0],
[C12, C11, C13, 0, 0, 0],
[C13, C13, C33, 0, 0, 0],
[ 0, 0, 0, C44, 0, 0],
[ 0, 0, 0, 0, C44, 0],
[ 0, 0, 0, 0, 0, C66]])
return C
elif self is Symmetry.orthorhombic:
if len(elastic_constants) != 9:
raise ValueError('Error: need 9 elastic constants for tetragonal '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C22, C23, C33, C44, C55, C66 = elastic_constants
C = np.array([[C11, C12, C13, 0, 0, 0],
[C12, C22, C23, 0, 0, 0],
[C13, C23, C33, 0, 0, 0],
[ 0, 0, 0, C44, 0, 0],
[ 0, 0, 0, 0, C55, 0],
[ 0, 0, 0, 0, 0, C66]])
return C
elif self is Symmetry.monoclinic:
if len(elastic_constants) != 13:
raise ValueError('Error: need 13 elastic constants for monoclinic '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C16, C22, C23, C26, C33, C36, C44, C45, \
C55, C66 = elastic_constants
C = np.array([[C11, C12, C13, 0, 0, C16],
[C12, C22, C23, 0, 0, C26],
[C13, C23, C33, 0, 0, C36],
[ 0, 0, 0, C44, C45, 0],
[ 0, 0, 0, C45, C55, 0],
[C16, C26, C36, 0, 0, C66]])
return C
elif self is Symmetry.triclinic:
if len(elastic_constants) != 21:
raise ValueError('Error: need 21 elastic constants for triclinic '
'symmetry, got %d' % len(elastic_constants))
C11, C12, C13, C14, C15, C16, C22, C23, C24, C25, C26, C33, \
C34, C35, C36, C44, C45, C46, C55, C56, C66 = elastic_constants
C = np.array([[C11, C12, C13, C14, C15, C16],
[C12, C22, C23, C24, C25, C26],
[C13, C23, C33, C34, C35, C36],
[C14, C24, C34, C44, C45, C46],
[C15, C25, C35, C45, C55, C56],
[C16, C26, C36, C46, C56, C66]])
return C
else:
raise ValueError('warning, symmetry not supported: %s' % self)
@staticmethod
def orthotropic_constants_from_stiffness(C):
"""Return orthotropic elastic constants from stiffness matrix.
:param ndarray C: a numpy array of shape (6, 6) representing
the stiffness matrix.
:return dict OrthoElas: Dict of orthotropic elastic constants
corresponding to the input stiffness matrix. Keys are
'E1','E2','E3','nu12','nu13','nu23','G12','G13','G23'
"""
# compute the compliance matrix
S = np.linalg.inv(C)
# compute orthotropic elastic constants
OrthoElas = dict()
OrthoElas['E1'] = 1 / S[0, 0]
OrthoElas['E2'] = 1 / S[1, 1]
OrthoElas['E3'] = 1 / S[2, 2]
OrthoElas['Nu12'] = -OrthoElas['E1'] * S[1, 0]
OrthoElas['Nu13'] = -OrthoElas['E1'] * S[2, 0]
OrthoElas['Nu23'] = -OrthoElas['E2'] * S[2, 1]
OrthoElas['G12'] = 1 / S[5, 5]
OrthoElas['G13'] = 1 / S[4, 4]
OrthoElas['G23'] = 1 / S[3, 3]
# return a dictionnay populated with the relevant values
return OrthoElas
class Lattice:
"""
The Lattice class to create one of the 14 Bravais lattices.
This particular class has been partly inspired from the pymatgen
project at https://github.com/materialsproject/pymatgen
Any of the 7 lattice systems (each corresponding to one point group)
can be easily created and manipulated.
The lattice centering can be specified to form any of the 14 Bravais
lattices:
* Primitive (P): lattice points on the cell corners only (default);
* Body (I): one additional lattice point at the center of the cell;
* Face (F): one additional lattice point at the center of each of
the faces of the cell;
* Base (A, B or C): one additional lattice point at the center of
each of one pair of the cell faces.
::
a = 0.352 # FCC Nickel
l = Lattice.face_centered_cubic(a)
print(l.volume())
Additionnally the point-basis can be controlled to address non
Bravais lattice cells. It is set to a single atoms at (0, 0, 0) by
default so that each cell is a Bravais lattice but may be changed to
something more complex to achieve HCP structure or Diamond structure
for instance.
"""
def __init__(self, matrix, centering='P', symmetry=None):
"""Create a crystal lattice (unit cell).
Create a lattice from a 3x3 matrix. Each row in the matrix represents
one lattice vector. The unit is nm.
:param ndarray matrix: the 3x3 matrix representing the crystal lattice.
:param str centering:
"""
m = np.array(matrix, dtype=np.float64).reshape((3, 3))
lengths = np.sqrt(np.sum(m ** 2, axis=1))
angles = np.zeros(3)
for i in range(3):
j = (i + 1) % 3
k = (i + 2) % 3
angles[i] = dot(m[j], m[k]) / (lengths[j] * lengths[k])
angles = np.arccos(angles) * 180. / pi
self._angles = angles
self._lengths = lengths
self._matrix = m
self._centering = centering
self._symmetry = symmetry
def __eq__(self, other):
"""Override the default Equals behavior.
The equality of two Lattice objects is based on the equality of their
angles, lengths, centering, and symmetry.
:param other: the other `Lattice` instance to test.
:return: True if the two lattice are equals False if not.
"""
if not isinstance(other, self.__class__):
return False
for i in range(3):
if self._angles[i] != other._angles[i]:
return False
elif self._lengths[i] != other._lengths[i]:
return False
if self._centering != other._centering:
return False
if self._symmetry != other._symmetry:
return False
return True
def __repr__(self):
"""Gives a string representation of this instance of the Lattice class."""
a, b, c = self._lengths
alpha, beta, gamma = self._angles
out = 'Lattice (%s)' % self._symmetry
out += ' a=%.3f, b=%.3f, c=%.3f' % (a, b, c)
out += ' alpha=%.1f, beta=%.1f, gamma=%.1f' % (alpha, beta, gamma)
return out
def reciprocal_lattice(self):
'''Compute the reciprocal lattice.
The reciprocal lattice defines a crystal in terms of vectors that
are normal to a plane and whose lengths are the inverse of the
interplanar spacing. This method computes the three reciprocal
lattice vectors defined by:
.. math::
* a.a^* = 1
* b.b^* = 1
* c.c^* = 1
'''
[a, b, c] = self._matrix
V = self.volume()
astar = np.cross(b, c) / V
bstar = np.cross(c, a) / V
cstar = np.cross(a, b) / V
return [astar, bstar, cstar]
@property
def matrix(self):
"""Returns a copy of matrix representing the Lattice."""
return np.copy(self._matrix)
def get_symmetry(self):
"""Returns the type of `Symmetry` of the Lattice."""
return self._symmetry
@staticmethod
def symmetry(crystal_structure=Symmetry.cubic, use_miller_bravais=False):
"""Define the equivalent crystal symmetries.
Those come from Randle & Engler, 2000. For instance in the cubic
crystal struture, for instance there are 24 equivalent cube orientations.
:param crystal_structure: an instance of the `Symmetry` class describing the crystal symmetry.
:raise ValueError: if the given symmetry is not supported.
:returns array: A numpy array of shape (n, 3, 3) where n is the \
number of symmetries of the given crystal structure.
"""
return crystal_structure.symmetry_operators(use_miller_bravais=use_miller_bravais)
def get_lattice_parameters(self):
"""This function create a list of the independent lattice parameters depending on the symmetry.
:return: a list of the lattice parameters.
"""
sym = self.get_symmetry()
(a, b, c) = self._lengths
(alpha, beta, gamma) = self._angles
# craft a list of the lattice parameters
if sym is Symmetry.cubic:
parameters = [a]
elif sym in [Symmetry.hexagonal, Symmetry.trigonal, Symmetry.tetragonal]:
parameters = [a, c]
elif sym is Symmetry.orthorhombic:
parameters = [a, b, c]
elif sym is Symmetry.monoclinic:
parameters = [a, b, c, alpha]
else:
parameters = [a, b, c, alpha, beta, gamma]
return parameters
def guess_symmetry(self):
"""Guess the lattice symmetry from the geometry."""
(a, b, c) = self._lengths
(alpha, beta, gamma) = self._angles
return Lattice.guess_symmetry_from_parameters(a, b, c, alpha, beta, gamma)
@staticmethod
def guess_symmetry_from_parameters(a, b, c, alpha, beta, gamma):
"""Guess the lattice symmetry from the geometrical parameters."""
if alpha == 90. and beta == 90. and gamma == 90:
if a == b and a == c:
return Symmetry.cubic
elif a == b and a != c:
return Symmetry.tetragonal
else:
return Symmetry.orthorhombic
elif alpha == 90. and beta == 90. and gamma == 120 and a == b and a != c:
return Symmetry.hexagonal
elif a == b and a == c and alpha == beta and alpha == gamma:
return Symmetry.trigonal
elif a != b and a != c and beta == gamma and alpha != beta:
return Symmetry.monoclinic
else:
return Symmetry.triclinic
@staticmethod
def from_cif(file_path):
"""
Create a crystal Lattice using information contained in a given CIF
file (Crystallographic Information Framework, a standard for
information interchange in crystallography).
Reference: <NAME>, <NAME> and <NAME>,
The crystallographic information file (CIF): a new standard archive file for crystallography,
Acta Crystallographica Section A, 47(6):655-685 (1991)
doi = 10.1107/S010876739101067X
.. note::
Lattice constants are given in Angstrom in CIF files and so
converted to nanometer.
:param str file_path: The path to the CIF file representing the crystal structure.
:returns: A `Lattice` instance corresponding to the given CIF file.
"""
cf = CifFile.ReadCif(file_path)
# crystal = eval('cf[\'%s\']' % symbol)
crystal = cf.first_block()
a = 0.1 * float(crystal['_cell_length_a'])
b = 0.1 * float(crystal['_cell_length_b'])
c = 0.1 * float(crystal['_cell_length_c'])
alpha = float(crystal['_cell_angle_alpha'])
beta = float(crystal['_cell_angle_beta'])
gamma = float(crystal['_cell_angle_gamma'])
try:
symmetry = Symmetry.from_string(crystal['_symmetry_cell_setting'])
except KeyError:
symmetry = Lattice.guess_symmetry_from_parameters(a, b, c, alpha, beta, gamma)
return Lattice.from_parameters(a, b, c, alpha, beta, gamma, symmetry=symmetry)
@staticmethod
def from_symbol(symbol):
'''
Create a crystal Lattice using information contained in a unit cell.
*Parameters*
**symbol**: The chemical symbol of the crystal (eg 'Al')
*Returns*
A `Lattice` instance corresponding to the given element.
'''
path = os.path.dirname(__file__)
return Lattice.from_cif(os.path.join(path, 'cif', '%s.cif' % symbol))
@staticmethod
def cubic(a):
'''
Create a cubic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter (a = b = c here)
*Returns*
A `Lattice` instance corresponding to a primitice cubic lattice.
'''
return Lattice([[a, 0.0, 0.0], [0.0, a, 0.0], [0.0, 0.0, a]], symmetry=Symmetry.cubic)
@staticmethod
def body_centered_cubic(a):
'''
Create a body centered cubic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter (a = b = c here)
*Returns*
A `Lattice` instance corresponding to a body centered cubic
lattice.
'''
return Lattice.from_parameters(a, a, a, 90, 90, 90, centering='I', symmetry=Symmetry.cubic)
@staticmethod
def face_centered_cubic(a):
'''
Create a face centered cubic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter (a = b = c here)
*Returns*
A `Lattice` instance corresponding to a face centered cubic
lattice.
'''
return Lattice.from_parameters(a, a, a, 90, 90, 90, centering='F', symmetry=Symmetry.cubic)
@staticmethod
def tetragonal(a, c):
'''
Create a tetragonal Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**c**: third lattice length parameter (b = a here)
*Returns*
A `Lattice` instance corresponding to a primitive tetragonal
lattice.
'''
return Lattice.from_parameters(a, a, c, 90, 90, 90, symmetry=Symmetry.tetragonal)
@staticmethod
def body_centered_tetragonal(a, c):
'''
Create a body centered tetragonal Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**c**: third lattice length parameter (b = a here)
*Returns*
A `Lattice` instance corresponding to a body centered tetragonal
lattice.
'''
return Lattice.from_parameters(a, a, c, 90, 90, 90, centering='I', symmetry=Symmetry.tetragonal)
@staticmethod
def orthorhombic(a, b, c):
'''
Create a tetragonal Lattice unit cell with 3 different length
parameters a, b and c.
'''
return Lattice.from_parameters(a, b, c, 90, 90, 90, symmetry=Symmetry.orthorhombic)
@staticmethod
def base_centered_orthorhombic(a, b, c):
'''
Create a based centered orthorombic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**b**: second lattice length parameter
**c**: third lattice length parameter
*Returns*
A `Lattice` instance corresponding to a based centered orthorombic
lattice.
'''
return Lattice.from_parameters(a, b, c, 90, 90, 90, centering='C', symmetry=Symmetry.orthorhombic)
@staticmethod
def body_centered_orthorhombic(a, b, c):
'''
Create a body centered orthorombic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**b**: second lattice length parameter
**c**: third lattice length parameter
*Returns*
A `Lattice` instance corresponding to a body centered orthorombic
lattice.
'''
return Lattice.from_parameters(a, b, c, 90, 90, 90, centering='I', symmetry=Symmetry.orthorhombic)
@staticmethod
def face_centered_orthorhombic(a, b, c):
'''
Create a face centered orthorombic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**b**: second lattice length parameter
**c**: third lattice length parameter
*Returns*
A `Lattice` instance corresponding to a face centered orthorombic
lattice.
'''
return Lattice.from_parameters(a, b, c, 90, 90, 90, centering='F', symmetry=Symmetry.orthorhombic)
@staticmethod
def hexagonal(a, c):
'''
Create a hexagonal Lattice unit cell with length parameters a and c.
'''
return Lattice.from_parameters(a, a, c, 90, 90, 120, symmetry=Symmetry.hexagonal)
@staticmethod
def rhombohedral(a, alpha):
'''
Create a rhombohedral Lattice unit cell with one length
parameter a and the angle alpha.
'''
return Lattice.from_parameters(a, a, a, alpha, alpha, alpha, symmetry=Symmetry.trigonal)
@staticmethod
def monoclinic(a, b, c, alpha):
'''
Create a monoclinic Lattice unit cell with 3 different length
parameters a, b and c. The cell angle is given by alpha.
The lattice centering id primitive ie. 'P'
'''
return Lattice.from_parameters(a, b, c, alpha, 90, 90, symmetry=Symmetry.monoclinic)
@staticmethod
def base_centered_monoclinic(a, b, c, alpha):
'''
Create a based centered monoclinic Lattice unit cell.
*Parameters*
**a**: first lattice length parameter
**b**: second lattice length parameter
**c**: third lattice length parameter
**alpha**: first lattice angle parameter
*Returns*
A `Lattice` instance corresponding to a based centered monoclinic
lattice.
'''
return Lattice.from_parameters(a, b, c, alpha, 90, 90, centering='C', symmetry=Symmetry.monoclinic)
@staticmethod
def triclinic(a, b, c, alpha, beta, gamma):
'''
Create a triclinic Lattice unit cell with 3 different length
parameters a, b, c and three different cell angles alpha, beta
and gamma.
..note::
This method is here for the sake of completeness since one can
create the triclinic cell directly using the `from_parameters`
method.
'''
return Lattice.from_parameters(a, b, c, alpha, beta, gamma, symmetry=Symmetry.triclinic)
@staticmethod
def from_symmetry(symmetry, parameters):
"""Create a new lattice based on a type of symmetry and a list of lattice parameters.
The type of symmetry should be an instance of `Symmetry` and the list of parameters should contain the
appropriate number: 1 for cubic, 2 for hexagonal, tetragonal or trigonal, 3 for orthorhombic, 4 for monoclinic
or 6 for triclinic.
:param symmetry: an instance of `Symmetry`.
:param list parameters: a list of the lattice parameters.
:return: the newly created `Lattice` instance.
"""
if symmetry is Symmetry.cubic:
if len(parameters) != 1:
raise(ValueError('The number of parameters for %s symmetry should be 1, got %d' % (symmetry, len(parameters))))
return Lattice.cubic(parameters[0])
elif symmetry in [Symmetry.hexagonal, Symmetry.trigonal]:
if len(parameters) != 2:
raise(ValueError('The number of parameters for %s symmetry should be 2, got %d' % (symmetry, len(parameters))))
return Lattice.hexagonal(parameters[0], parameters[1])
elif symmetry is Symmetry.orthorhombic:
if len(parameters) != 3:
raise(ValueError('The number of parameters for %s symmetry should be 3, got %d' % (symmetry, len(parameters))))
return Lattice.orthorhombic(parameters[0], parameters[1], parameters[2])
elif symmetry is Symmetry.tetragonal:
if len(parameters) != 2:
raise(ValueError('The number of parameters for %s symmetry should be 2, got %d' % (symmetry, len(parameters))))
return Lattice.tetragonal(parameters[0], parameters[1])
elif symmetry is Symmetry.monoclinic:
if len(parameters) != 4:
raise(ValueError('The number of parameters for %s symmetry should be 4, got %d' % (symmetry, len(parameters))))
return Lattice.monoclinic(parameters[0], parameters[1], parameters[2], parameters[3])
else:
if len(parameters) != 6:
raise(ValueError('The number of parameters for triclinic symmetry should be 6, got %d' % len(parameters)))
return Lattice.triclinic(*parameters)
@staticmethod
def from_parameters(a, b, c, alpha, beta, gamma, x_aligned_with_a=True, centering='P', symmetry=Symmetry.triclinic):
"""
Create a Lattice using unit cell lengths and angles (in degrees).
The lattice centering can also be specified (among 'P', 'I', 'F',
'A', 'B' or 'C').
:param float a: first lattice length parameter.
:param float b: second lattice length parameter.
:param float c: third lattice length parameter.
:param float alpha: first lattice angle parameter.
:param float beta: second lattice angle parameter.
:param float gamma: third lattice angle parameter.
:param bool x_aligned_with_a: flag to control the convention used to define the Cartesian frame.
:param str centering: lattice centering ('P' by default) passed to the `Lattice` class.
:param symmetry: a `Symmetry` instance to be passed to the lattice.
:return: A `Lattice` instance with the specified lattice parameters and centering.
"""
alpha_r = radians(alpha)
beta_r = radians(beta)
gamma_r = radians(gamma)
if x_aligned_with_a: # first lattice vector (a) is aligned with X
vector_a = a * np.array([1, 0, 0])
vector_b = b * np.array([np.cos(gamma_r), np.sin(gamma_r), 0])
c1 = c * np.cos(beta_r)
c2 = c * (np.cos(alpha_r) - np.cos(gamma_r) * np.cos(beta_r)) / np.sin(gamma_r)
vector_c = np.array([c1, c2, np.sqrt(c ** 2 - c1 ** 2 - c2 ** 2)])
else: # third lattice vector (c) is aligned with Z
cos_gamma_star = (np.cos(alpha_r) * np.cos(beta_r) - np.cos(gamma_r)) / (np.sin(alpha_r) * np.sin(beta_r))
sin_gamma_star = np.sqrt(1 - cos_gamma_star ** 2)
vector_a = [a * np.sin(beta_r), 0.0, a * np.cos(beta_r)]
vector_b = [-b * np.sin(alpha_r) * cos_gamma_star, b * | np.sin(alpha_r) | numpy.sin |
from convergence import Convergence
from numpy import sqrt, repeat, tile, hstack, array, zeros, ones, sqrt, diag, asarray, hstack, vstack, split, cumsum
from numpy.random import randn
from copy import copy
from numpy.linalg import svd
import cvxpy as cp
# XXX does not support splitting over samples yet (only over features to
# accommodate arbitrary losses by column).
class GLRM(object):
def __init__(self, A, loss, regX, regY, k, missing_list = None, converge = None, scale=True):
self.scale = scale
# Turn everything in to lists / convert to correct dimensions
if not isinstance(A, list): A = [A]
if not isinstance(loss, list): loss = [loss]
if not isinstance(regY, list): regY = [regY]
if len(regY) == 1 and len(regY) < len(loss):
regY = [copy(regY[0]) for _ in range(len(loss))]
if missing_list and not isinstance(missing_list[0], list): missing_list = [missing_list]
loss = [L(Aj) for Aj, L in zip(A, loss)]
# save necessary info
self.A, self.k, self.L = A, k, loss
if converge == None: self.converge = Convergence()
else: self.converge = converge
# initialize cvxpy problems
self._initialize_probs(A, k, missing_list, regX, regY)
def factors(self):
# return X, Y as matrices (not lists of sub matrices)
return self.X, hstack(self.Y)
def convergence(self):
# convergence information for alternating minimization algorithm
return self.converge
def predict(self):
# return decode(XY), low-rank approximation of A
return hstack([L.decode(self.X.dot(yj)) for Aj, yj, L in zip(self.A, self.Y, self.L)])
def fit(self, max_iters=100, eps=1e-2, use_indirect=False, warm_start=False):
Xv, Yp, pX = self.probX
Xp, Yv, pY = self.probY
self.converge.reset()
# alternating minimization
while not self.converge.d():
objX = pX.solve(solver=cp.SCS, eps=eps, max_iters=max_iters,
use_indirect=use_indirect, warm_start=warm_start)
Xp.value[:,:-1] = copy(Xv.value)
# can parallelize this
for ypj, yvj, pyj in zip(Yp, Yv, pY):
objY = pyj.solve(solver=cp.SCS, eps=eps, max_iters=max_iters,
use_indirect=use_indirect, warm_start=warm_start)
ypj.value = copy(yvj.value)
self.converge.obj.append(objX)
self._finalize_XY(Xv, Yv)
return self.X, self.Y
def _initialize_probs(self, A, k, missing_list, regX, regY):
# useful parameters
m = A[0].shape[0]
ns = [a.shape[1] for a in A]
if missing_list == None: missing_list = [[]]*len(self.L)
# initialize A, X, Y
B = self._initialize_A(A, missing_list)
X0, Y0 = self._initialize_XY(B, k, missing_list)
self.X0, self.Y0 = X0, Y0
# cvxpy problems
Xv, Yp = cp.Variable(m,k), [cp.Parameter(k+1,ni) for ni in ns]
Xp, Yv = cp.Parameter(m,k+1), [cp.Variable(k+1,ni) for ni in ns]
Xp.value = copy(X0)
for yj, yj0 in zip(Yp, Y0): yj.value = copy(yj0)
onesM = cp.Constant(ones((m,1)))
obj = sum(L(Aj, cp.mul_elemwise(mask, Xv*yj[:-1,:] \
+ onesM*yj[-1:,:]) + offset) + ry(yj[:-1,:])\
for L, Aj, yj, mask, offset, ry in \
zip(self.L, A, Yp, self.masks, self.offsets, regY)) + regX(Xv)
pX = cp.Problem(cp.Minimize(obj))
pY = [cp.Problem(cp.Minimize(\
L(Aj, cp.mul_elemwise(mask, Xp*yj) + offset) \
+ ry(yj[:-1,:]) + regX(Xp))) \
for L, Aj, yj, mask, offset, ry in zip(self.L, A, Yv, self.masks, self.offsets, regY)]
self.probX = (Xv, Yp, pX)
self.probY = (Xp, Yv, pY)
def _initialize_A(self, A, missing_list):
""" Subtract out means of non-missing, standardize by std. """
m = A[0].shape[0]
ns = [a.shape[1] for a in A]
mean, stdev = [zeros(ni) for ni in ns], [zeros(ni) for ni in ns]
B, masks, offsets = [], [], []
# compute stdev for entries that are not missing
for ni, sv, mu, ai, missing, L in zip(ns, stdev, mean, A, missing_list, self.L):
# collect non-missing terms
for j in range(ni):
elems = array([ai[i,j] for i in range(m) if (i,j) not in missing])
alpha = cp.Variable()
# calculate standarized energy per column
sv[j] = cp.Problem(cp.Minimize(\
L(elems, alpha*ones(elems.shape)))).solve()/len(elems)
mu[j] = alpha.value
offset, mask = tile(mu, (m,1)), tile(sv, (m,1))
mask[mask == 0] = 1
bi = (ai-offset)/mask # standardize
# zero-out missing entries (for XY initialization)
for (i,j) in missing: bi[i,j], mask[i,j] = 0, 0
B.append(bi) # save
masks.append(mask)
offsets.append(offset)
self.masks = masks
self.offsets = offsets
return B
def _initialize_XY(self, B, k, missing_list):
""" Scale by ration of non-missing, SVD, append col of ones, add noise. """
A = hstack(bi for bi in B)
m, n = A.shape
# normalize entries that are missing
if self.scale: stdev = A.std(0)
else: stdev = ones(n)
mu = A.mean(0)
C = sqrt(1e-2/k) # XXX may need to be adjusted for larger problems
A = (A-mu)/stdev + C*randn(m,n)
# SVD to get initial point
u, s, v = svd(A, full_matrices = False)
u, s, v = u[:,:k], diag(sqrt(s[:k])), v[:k,:]
X0, Y0 = asarray(u.dot(s)), asarray(s.dot(v))*asarray(stdev)
# append col of ones to X, row of zeros to Y
X0 = hstack((X0, ones((m,1)))) + C*randn(m,k+1)
Y0 = vstack((Y0, mu)) + C* | randn(k+1,n) | numpy.random.randn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.