prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import torch
import os
import copy
import numpy as np
from pyquaternion import Quaternion
from utils.data_classes import PointCloud
from utils.metrics import estimateOverlap
from config import cfg
from scipy.optimize import leastsq
def distanceBB_Gaussian(box1, box2, sigma=1):
off1 = np.array([
box1.center[0], box1.center[2],
Quaternion(matrix=box1.rotation_matrix).degrees
])
off2 = np.array([
box2.center[0], box2.center[2],
Quaternion(matrix=box2.rotation_matrix).degrees
])
dist = np.linalg.norm(off1 - off2)
score = np.exp(-0.5 * (dist) / (sigma * sigma))
return score
# IoU or Gaussian score map
def getScoreGaussian(offset, sigma=1):
coeffs = [1, 1, 1 / 5]
dist = np.linalg.norm(np.multiply(offset, coeffs))
score = np.exp(-0.5 * (dist) / (sigma * sigma))
return torch.tensor([score])
def getScoreIoU(a, b):
score = estimateOverlap(a, b)
return torch.tensor([score])
def getScoreHingeIoU(a, b):
score = estimateOverlap(a, b)
if score < 0.5:
score = 0.0
return torch.tensor([score])
def getOffsetBB(box, offset):
rot_quat = Quaternion(matrix=box.rotation_matrix)
trans = np.array(box.center)
new_box = copy.deepcopy(box)
new_box.translate(-trans)
new_box.rotate(rot_quat.inverse)
# REMOVE TRANSfORM
if len(offset) == 3:
new_box.rotate(
Quaternion(axis=[0, 0, 1], angle=offset[2] * np.pi / 180))
elif len(offset) == 4:
new_box.rotate(
Quaternion(axis=[0, 0, 1], angle=offset[3] * np.pi / 180))
if offset[0]>new_box.wlh[0]:
offset[0] = np.random.uniform(-1,1)
if offset[1]>min(new_box.wlh[1],2):
offset[1] = np.random.uniform(-1,1)
new_box.translate(np.array([offset[0], offset[1], 0]))
# APPLY PREVIOUS TRANSFORMATION
new_box.rotate(rot_quat)
new_box.translate(trans)
return new_box
def voxelize(PC, dim_size=[48, 108, 48]):
# PC = normalizePC(PC)
if np.isscalar(dim_size):
dim_size = [dim_size] * 3
dim_size = np.atleast_2d(dim_size).T
PC = (PC + 0.5) * dim_size
# truncate to integers
xyz = PC.astype(np.int)
# discard voxels that fall outside dims
valid_ix = ~np.any((xyz < 0) | (xyz >= dim_size), 0)
xyz = xyz[:, valid_ix]
out = np.zeros(dim_size.flatten(), dtype=np.float32)
out[tuple(xyz)] = 1
# print(out)
return out
# def regularizePC2(input_size, PC,):
# return regularizePC(PC=PC, input_size=input_size)
def regularizePC_template(PC,input_size,istrain=True):
PC = np.array(PC.points, dtype=np.float32)
if np.shape(PC)[1] > 0:
if PC.shape[0] > 3:
PC = PC[0:3, :]
if PC.shape[1] != int(input_size/2):
if not istrain:
np.random.seed(1)
new_pts_idx = np.random.randint(
low=0, high=PC.shape[1], size=int(input_size/2), dtype=np.int64)
PC = PC[:, new_pts_idx]
PC = PC.reshape((3, int(input_size/2))).T
else:
PC = np.zeros((3, int(input_size/2))).T
return PC
def regularizePC_scene(PC,input_size,istrain=True):
PC = np.array(PC.points, dtype=np.float32)
if np.shape(PC)[1] > 0:
if PC.shape[0] > 3:
PC = PC[0:3, :]
if PC.shape[1] != input_size:
if not istrain:
np.random.seed(1)
new_pts_idx = np.random.randint(
low=0, high=PC.shape[1], size=input_size, dtype=np.int64)
PC = PC[:, new_pts_idx]
PC = PC.reshape((3, input_size)).T
else:
PC = np.zeros((3, input_size)).T
return PC
def getModel(PCs, boxes, offset=0, scale=1.0, normalize=False):
if len(PCs) == 0:
return PointCloud(np.ones((3, 0)))
points = np.ones((PCs[0].points.shape[0], 0))
for PC, box in zip(PCs, boxes):
cropped_PC = cropAndCenterPC(
PC, box, offset=offset, scale=scale, normalize=normalize)
# try:
if cropped_PC.points.shape[1] > 0:
points = np.concatenate([points, cropped_PC.points], axis=1)
PC = PointCloud(points)
return PC
def cropPC(PC, box, offset=0, scale=1.0):
box_tmp = copy.deepcopy(box)
box_tmp.wlh = box_tmp.wlh * scale
maxi = np.max(box_tmp.corners(), 1) + offset
mini = np.min(box_tmp.corners(), 1) - offset
x_filt_max = PC.points[0, :] < maxi[0]
x_filt_min = PC.points[0, :] > mini[0]
y_filt_max = PC.points[1, :] < maxi[1]
y_filt_min = PC.points[1, :] > mini[1]
z_filt_max = PC.points[2, :] < maxi[2]
z_filt_min = PC.points[2, :] > mini[2]
close = np.logical_and(x_filt_min, x_filt_max)
close = np.logical_and(close, y_filt_min)
close = np.logical_and(close, y_filt_max)
close = np.logical_and(close, z_filt_min)
close = np.logical_and(close, z_filt_max)
new_PC = PointCloud(PC.points[:, close])
return new_PC
def getlabelPC(PC, box, offset=0, scale=1.0):
box_tmp = copy.deepcopy(box)
new_PC = PointCloud(PC.points.copy())
rot_mat = np.transpose(box_tmp.rotation_matrix)
trans = -box_tmp.center
# align data
new_PC.translate(trans)
box_tmp.translate(trans)
new_PC.rotate((rot_mat))
box_tmp.rotate(Quaternion(matrix=(rot_mat)))
box_tmp.wlh = box_tmp.wlh * scale
maxi = np.max(box_tmp.corners(), 1) + offset
mini = np.min(box_tmp.corners(), 1) - offset
x_filt_max = new_PC.points[0, :] < maxi[0]
x_filt_min = new_PC.points[0, :] > mini[0]
y_filt_max = new_PC.points[1, :] < maxi[1]
y_filt_min = new_PC.points[1, :] > mini[1]
z_filt_max = new_PC.points[2, :] < maxi[2]
z_filt_min = new_PC.points[2, :] > mini[2]
close = np.logical_and(x_filt_min, x_filt_max)
close = np.logical_and(close, y_filt_min)
close = np.logical_and(close, y_filt_max)
close = np.logical_and(close, z_filt_min)
close = np.logical_and(close, z_filt_max)
new_label = np.zeros(new_PC.points.shape[1])
new_label[close] = 1
return new_label
def cropPCwithlabel(PC, box, label, offset=0, scale=1.0):
box_tmp = copy.deepcopy(box)
box_tmp.wlh = box_tmp.wlh * scale
maxi = np.max(box_tmp.corners(), 1) + offset
mini = np.min(box_tmp.corners(), 1) - offset
x_filt_max = PC.points[0, :] < maxi[0]
x_filt_min = PC.points[0, :] > mini[0]
y_filt_max = PC.points[1, :] < maxi[1]
y_filt_min = PC.points[1, :] > mini[1]
z_filt_max = PC.points[2, :] < maxi[2]
z_filt_min = PC.points[2, :] > mini[2]
close = np.logical_and(x_filt_min, x_filt_max)
close = np.logical_and(close, y_filt_min)
close = np.logical_and(close, y_filt_max)
close = np.logical_and(close, z_filt_min)
close = np.logical_and(close, z_filt_max)
new_PC = PointCloud(PC.points[:, close])
new_label = label[close]
return new_PC,new_label
def weight_process(include,low,high):
if include<low:
weight = 0.7
elif include >high:
weight = 1
else:
weight = (include*2.0+3.0*high-5.0*low)/(5*(high-low))
return weight
def func(a, x):
k, b = a
return k * x + b
def dist(a, x, y):
return func(a, x) - y
def weight_process2(k):
k = abs(k)
if k>1:
weight = 0.7
else:
weight = 1-0.3*k
return weight
def cropAndCenterPC(PC, box, offset=0, scale=1.0, normalize=False):
new_PC = cropPC(PC, box, offset=2 * offset, scale=4 * scale)
new_box = copy.deepcopy(box)
rot_mat = np.transpose(new_box.rotation_matrix)
trans = -new_box.center
# align data
new_PC.translate(trans)
new_box.translate(trans)
new_PC.rotate((rot_mat))
new_box.rotate(Quaternion(matrix=(rot_mat)))
# crop around box
new_PC = cropPC(new_PC, new_box, offset=offset, scale=scale)
if normalize:
new_PC.normalize(box.wlh)
return new_PC
def Centerbox(sample_box, gt_box):
rot_mat = np.transpose(gt_box.rotation_matrix)
trans = -gt_box.center
new_box = copy.deepcopy(sample_box)
new_box.translate(trans)
new_box.rotate(Quaternion(matrix=(rot_mat)))
return new_box
def cropAndCenterPC_new(PC, sample_box, gt_box, offset=0, scale=1.0, normalize=False):
new_PC = cropPC(PC, sample_box, offset=2 * offset, scale=4 * scale)
new_box = copy.deepcopy(sample_box)
new_label = getlabelPC(new_PC, gt_box, offset=offset, scale=scale)
new_box_gt = copy.deepcopy(gt_box)
# new_box_gt2 = copy.deepcopy(gt_box)
#rot_quat = Quaternion(matrix=new_box.rotation_matrix)
rot_mat = np.transpose(new_box.rotation_matrix)
trans = -new_box.center
# align data
new_PC.translate(trans)
new_box.translate(trans)
new_PC.rotate((rot_mat))
new_box.rotate(Quaternion(matrix=(rot_mat)))
new_box_gt.translate(trans)
new_box_gt.rotate(Quaternion(matrix=(rot_mat)))
# crop around box
new_PC, new_label = cropPCwithlabel(new_PC, new_box, new_label, offset=offset+cfg.SEARCH_AREA, scale=1 * scale)
#new_PC, new_label = cropPCwithlabel(new_PC, new_box, new_label, offset=offset+0.6, scale=1 * scale)
center_box_gt = [new_box_gt.center[0],new_box_gt.center[1],new_box_gt.center[2],new_box_gt.wlh[0],new_box_gt.wlh[1],new_box_gt.wlh[2],new_box_gt.orientation.axis[2] * new_box_gt.orientation.radians]
center_box_gt = np.array(center_box_gt)
# label_reg = np.tile(label_reg,[np.size(new_label),1])
if normalize:
new_PC.normalize(sample_box.wlh)
return new_PC, center_box_gt, trans, rot_mat
# def cropAndCenterPC_label_test_time(PC, sample_box, offset=0, scale=1.0):
#
# new_PC = cropPC(PC, sample_box, offset=2 * offset, scale=4 * scale)
#
# new_box = copy.deepcopy(sample_box)
#
# rot_quat = Quaternion(matrix=new_box.rotation_matrix)
# rot_mat = np.transpose(new_box.rotation_matrix)
# trans = -new_box.center
#
# # align data
# new_PC.translate(trans)
# new_box.translate(trans)
# new_PC.rotate((rot_mat))
# new_box.rotate(Quaternion(matrix=(rot_mat)))
#
# # crop around box
# new_PC = cropPC(new_PC, new_box, offset=offset+2.0, scale=scale)
#
# return new_PC
# def cropAndCenterPC_test(PC, sample_box, gt_box, offset=0, scale=1.0, normalize=False):
#
# new_PC = cropPC(PC, sample_box, offset=2 * offset, scale=4 * scale)
#
# new_box = copy.deepcopy(sample_box)
#
# new_label = getlabelPC(new_PC, gt_box, offset=offset, scale=scale)
# new_box_gt = copy.deepcopy(gt_box)
# # new_box_gt2 = copy.deepcopy(gt_box)
#
# rot_quat = Quaternion(matrix=new_box.rotation_matrix)
# rot_mat = np.transpose(new_box.rotation_matrix)
# trans = -new_box.center
#
# # align data
# new_PC.translate(trans)
# new_box.translate(trans)
# new_PC.rotate((rot_mat))
# new_box.rotate(Quaternion(matrix=(rot_mat)))
#
# new_box_gt.translate(trans)
# new_box_gt.rotate(Quaternion(matrix=(rot_mat)))
# # new_box_gt2.translate(trans)
# # new_box_gt2.rotate(rot_quat.inverse)
#
# # crop around box
# new_PC, new_label = cropPCwithlabel(new_PC, new_box, new_label, offset=offset+cfg.SEARCH_AREA, scale=1 * scale)
# #new_PC, new_label = cropPCwithlabel(new_PC, new_box, new_label, offset=offset+0.6, scale=1 * scale)
#
# label_reg = [new_box_gt.center[0],new_box_gt.center[1],new_box_gt.center[2]]
# label_reg = np.array(label_reg)
# # label_reg = (new_PC.points - np.tile(new_box_gt.center,[np.size(new_label),1]).T) * np.expand_dims(new_label, axis=0)
# # newoff = [new_box_gt.center[0],new_box_gt.center[1],new_box_gt.center[2]]
# # newoff = np.array(newoff)
#
# if normalize:
# new_PC.normalize(sample_box.wlh)
# return new_PC, trans, rot_mat
def getPJMatrix(calib):
R = np.zeros([4, 4], dtype=np.float32)
R[:3, :3] = calib['R_rect']
R[3, 3] = 1
M = np.dot(calib['P2:'], R)
return M
def project_velo2rgb(box, pj_matrix):
box = box.corners().T
box3d = np.ones([8, 4], dtype=np.float32)
box3d[:, :3] = box
box2d = np.dot(pj_matrix, box3d.T)
box2d = box2d[:2, :].T/box2d[2, :].reshape(8, 1)
projections = box2d
minx = 0 if np.min(projections[:, 0]) < 0 else int(np.min(projections[:, 0]))
maxx = 0 if np.max(projections[:, 0]) < 0 else int( | np.max(projections[:, 0]) | numpy.max |
#!/usr/bin/env python
import gym
from gym_line_follower.envs import LineFollowerEnv
import numpy as np
import time
# PID gain
P=0.13
I=0.002
D=0.00001
# Sample time
Ts = 1/200
# as https://www.scilab.org/discrete-time-pid-controller-implementation trapezoidal form
class PidControl:
"""docstring for PidControl"""
def __init__(self, P,I,D, Ts, sensorLen, vB=0.4, thresh=0.5):
self.P = P
self.I = I
self.D = D
# Aux constants
self.b0 = (2*Ts*P + I*Ts*Ts + 4*D)/(2*Ts)
self.b1 = (2*I*Ts - 8*D)/(2*Ts)
self.b2 = (I*Ts*Ts - 2*Ts*P + 4*D)/(2*Ts)
self.u1 = 0
self.u2 = 0
self.e1 = 0
self.e2 = 0
# motor
self.vB = vB
# sensor
self.thresh = thresh
self.sMax = sensorLen/2
self.sensorPos = np.arange(sensorLen) - (sensorLen-1)/2
def linePos(self, sensor):
# Check if sensors are out of the line
if | np.all(sensor < self.thresh) | numpy.all |
import numpy as np
import math
'''
corner bit indices
1------0
| |
| |
3------2
'''
g_rules = [
[], # 0000
['UR'], # 0001
['LU'], # 0010
['LR'], # 0011
['RD'], # 0100
['UD'], # 0101
['RD', 'LU'], # 0110
['LD'], # 0111
['DL'], # 1000
['DL', 'UR'], # 1001
['DU'], # 1010
['DR'], # 1011
['RL'], # 1100
['UL'], # 1101
['RU'], # 1110
[], # 1111
]
g_offsets = {
'L': np.array([-1, 0]),
'R': np.array([1, 0]),
'U': np.array([0, 1]),
'D': np.array([0, -1]),
}
def march(A, cell_size):
'''
Input:
0/1 matrix A, cell size, one grid corresponding to one entry in the matrix,
assuming x-axis corresponds to rows in A.
Output:
a list of (u, v) where u,v are 2d points, representing an edge
'''
results = []
n, m = A.shape
for i in range(n-1):
for j in range(m-1):
c = np.array([(i+1) * cell_size, (j+1) * cell_size]) # center coordinate
mask = (A[i,j] << 3) | (A[i+1, j] << 2) | (A[i, j+1] << 1) | A[i+1, j+1]
rule = g_rules[mask]
for r in rule:
u = c + g_offsets[r[0]] * cell_size / 2
v = c + g_offsets[r[1]] * cell_size / 2
results.append((u,v))
return results
def super_resolution(img):
# output a image with dimensions doubled
n, m = img.shape
res = np.zeros(shape=[n * 2, m * 2])
def get_loc_old(i, j):
return np.array([i + 0.5, j + 0.5])
def calc_weight(oi, oj, loc):
oloc = get_loc_old(oi, oj)
d_sqr = | np.dot(oloc - loc, oloc - loc) | numpy.dot |
# -*- coding: utf-8 -*-
"""Tests and Confidence Intervals for Binomial Proportions
Created on Fri Mar 01 00:23:07 2013
Author: <NAME>
License: BSD-3
"""
from statsmodels.compat.python import lzip
import numpy as np
from scipy import stats, optimize
from sys import float_info
from statsmodels.stats.base import AllPairsResults
from statsmodels.tools.sm_exceptions import HypothesisTestWarning
from statsmodels.stats.weightstats import _zstat_generic2
from statsmodels.stats.base import HolderTuple
from statsmodels.tools.testing import Holder
def proportion_confint(count, nobs, alpha=0.05, method='normal'):
'''confidence interval for a binomial proportion
Parameters
----------
count : int or array_array_like
number of successes, can be pandas Series or DataFrame
nobs : int
total number of trials
alpha : float in (0, 1)
significance level, default 0.05
method : {'normal', 'agresti_coull', 'beta', 'wilson', 'binom_test'}
default: 'normal'
method to use for confidence interval,
currently available methods :
- `normal` : asymptotic normal approximation
- `agresti_coull` : Agresti-Coull interval
- `beta` : Clopper-Pearson interval based on Beta distribution
- `wilson` : Wilson Score interval
- `jeffreys` : Jeffreys Bayesian Interval
- `binom_test` : experimental, inversion of binom_test
Returns
-------
ci_low, ci_upp : float, ndarray, or pandas Series or DataFrame
lower and upper confidence level with coverage (approximately) 1-alpha.
When a pandas object is returned, then the index is taken from the
`count`.
Notes
-----
Beta, the Clopper-Pearson exact interval has coverage at least 1-alpha,
but is in general conservative. Most of the other methods have average
coverage equal to 1-alpha, but will have smaller coverage in some cases.
The 'beta' and 'jeffreys' interval are central, they use alpha/2 in each
tail, and alpha is not adjusted at the boundaries. In the extreme case
when `count` is zero or equal to `nobs`, then the coverage will be only
1 - alpha/2 in the case of 'beta'.
The confidence intervals are clipped to be in the [0, 1] interval in the
case of 'normal' and 'agresti_coull'.
Method "binom_test" directly inverts the binomial test in scipy.stats.
which has discrete steps.
TODO: binom_test intervals raise an exception in small samples if one
interval bound is close to zero or one.
References
----------
https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
<NAME>.; <NAME>; <NAME> (2001). "Interval
Estimation for a Binomial Proportion",
Statistical Science 16 (2): 101–133. doi:10.1214/ss/1009213286.
TODO: Is this the correct one ?
'''
pd_index = getattr(count, 'index', None)
if pd_index is not None and callable(pd_index):
# this rules out lists, lists have an index method
pd_index = None
count = np.asarray(count)
nobs = np.asarray(nobs)
q_ = count * 1. / nobs
alpha_2 = 0.5 * alpha
if method == 'normal':
std_ = np.sqrt(q_ * (1 - q_) / nobs)
dist = stats.norm.isf(alpha / 2.) * std_
ci_low = q_ - dist
ci_upp = q_ + dist
elif method == 'binom_test':
# inverting the binomial test
def func(qi):
return stats.binom_test(q_ * nobs, nobs, p=qi) - alpha
if count == 0:
ci_low = 0
else:
ci_low = optimize.brentq(func, float_info.min, q_)
if count == nobs:
ci_upp = 1
else:
ci_upp = optimize.brentq(func, q_, 1. - float_info.epsilon)
elif method == 'beta':
ci_low = stats.beta.ppf(alpha_2, count, nobs - count + 1)
ci_upp = stats.beta.isf(alpha_2, count + 1, nobs - count)
if np.ndim(ci_low) > 0:
ci_low[q_ == 0] = 0
ci_upp[q_ == 1] = 1
else:
ci_low = ci_low if (q_ != 0) else 0
ci_upp = ci_upp if (q_ != 1) else 1
elif method == 'agresti_coull':
crit = stats.norm.isf(alpha / 2.)
nobs_c = nobs + crit**2
q_c = (count + crit**2 / 2.) / nobs_c
std_c = np.sqrt(q_c * (1. - q_c) / nobs_c)
dist = crit * std_c
ci_low = q_c - dist
ci_upp = q_c + dist
elif method == 'wilson':
crit = stats.norm.isf(alpha / 2.)
crit2 = crit**2
denom = 1 + crit2 / nobs
center = (q_ + crit2 / (2 * nobs)) / denom
dist = crit * np.sqrt(q_ * (1. - q_) / nobs + crit2 / (4. * nobs**2))
dist /= denom
ci_low = center - dist
ci_upp = center + dist
# method adjusted to be more forgiving of misspellings or incorrect option name
elif method[:4] == 'jeff':
ci_low, ci_upp = stats.beta.interval(1 - alpha, count + 0.5,
nobs - count + 0.5)
else:
raise NotImplementedError('method "%s" is not available' % method)
if method in ['normal', 'agresti_coull']:
ci_low = np.clip(ci_low, 0, 1)
ci_upp = np.clip(ci_upp, 0, 1)
if pd_index is not None and np.ndim(ci_low) > 0:
import pandas as pd
if np.ndim(ci_low) == 1:
ci_low = pd.Series(ci_low, index=pd_index)
ci_upp = pd.Series(ci_upp, index=pd_index)
if np.ndim(ci_low) == 2:
ci_low = pd.DataFrame(ci_low, index=pd_index)
ci_upp = pd.DataFrame(ci_upp, index=pd_index)
return ci_low, ci_upp
def multinomial_proportions_confint(counts, alpha=0.05, method='goodman'):
'''Confidence intervals for multinomial proportions.
Parameters
----------
counts : array_like of int, 1-D
Number of observations in each category.
alpha : float in (0, 1), optional
Significance level, defaults to 0.05.
method : {'goodman', 'sison-glaz'}, optional
Method to use to compute the confidence intervals; available methods
are:
- `goodman`: based on a chi-squared approximation, valid if all
values in `counts` are greater or equal to 5 [2]_
- `sison-glaz`: less conservative than `goodman`, but only valid if
`counts` has 7 or more categories (``len(counts) >= 7``) [3]_
Returns
-------
confint : ndarray, 2-D
Array of [lower, upper] confidence levels for each category, such that
overall coverage is (approximately) `1-alpha`.
Raises
------
ValueError
If `alpha` is not in `(0, 1)` (bounds excluded), or if the values in
`counts` are not all positive or null.
NotImplementedError
If `method` is not kown.
Exception
When ``method == 'sison-glaz'``, if for some reason `c` cannot be
computed; this signals a bug and should be reported.
Notes
-----
The `goodman` method [2]_ is based on approximating a statistic based on
the multinomial as a chi-squared random variable. The usual recommendation
is that this is valid if all the values in `counts` are greater than or
equal to 5. There is no condition on the number of categories for this
method.
The `sison-glaz` method [3]_ approximates the multinomial probabilities,
and evaluates that with a maximum-likelihood estimator. The first
approximation is an Edgeworth expansion that converges when the number of
categories goes to infinity, and the maximum-likelihood estimator converges
when the number of observations (``sum(counts)``) goes to infinity. In
their paper, Sison & Glaz demo their method with at least 7 categories, so
``len(counts) >= 7`` with all values in `counts` at or above 5 can be used
as a rule of thumb for the validity of this method. This method is less
conservative than the `goodman` method (i.e. it will yield confidence
intervals closer to the desired significance level), but produces
confidence intervals of uniform width over all categories (except when the
intervals reach 0 or 1, in which case they are truncated), which makes it
most useful when proportions are of similar magnitude.
Aside from the original sources ([1]_, [2]_, and [3]_), the implementation
uses the formulas (though not the code) presented in [4]_ and [5]_.
References
----------
.. [1] <NAME>, "A representation for multinomial cumulative
distribution functions," The Annals of Statistics, Vol. 9, No. 5,
1981, pp. 1123-1126.
.. [2] <NAME>., "On simultaneous confidence intervals for multinomial
proportions," Technometrics, Vol. 7, No. 2, 1965, pp. 247-254.
.. [3] Sison, <NAME>., and <NAME>, "Simultaneous Confidence
Intervals and Sample Size Determination for Multinomial
Proportions," Journal of the American Statistical Association,
Vol. 90, No. 429, 1995, pp. 366-369.
.. [4] May, <NAME>., and <NAME>, "A SAS® macro for
constructing simultaneous confidence intervals for multinomial
proportions," Computer methods and programs in Biomedicine, Vol. 53,
No. 3, 1997, pp. 153-162.
.. [5] May, <NAME>., and <NAME>, "Constructing two-sided
simultaneous confidence intervals for multinomial proportions for
small counts in a large number of cells," Journal of Statistical
Software, Vol. 5, No. 6, 2000, pp. 1-24.
'''
if alpha <= 0 or alpha >= 1:
raise ValueError('alpha must be in (0, 1), bounds excluded')
counts = np.array(counts, dtype=float)
if (counts < 0).any():
raise ValueError('counts must be >= 0')
n = counts.sum()
k = len(counts)
proportions = counts / n
if method == 'goodman':
chi2 = stats.chi2.ppf(1 - alpha / k, 1)
delta = chi2 ** 2 + (4 * n * proportions * chi2 * (1 - proportions))
region = ((2 * n * proportions + chi2 +
np.array([- np.sqrt(delta), np.sqrt(delta)])) /
(2 * (chi2 + n))).T
elif method[:5] == 'sison': # We accept any name starting with 'sison'
# Define a few functions we'll use a lot.
def poisson_interval(interval, p):
"""Compute P(b <= Z <= a) where Z ~ Poisson(p) and
`interval = (b, a)`."""
b, a = interval
prob = stats.poisson.cdf(a, p) - stats.poisson.cdf(b - 1, p)
if p == 0 and np.isnan(prob):
# hack for older scipy <=0.16.1
return int(b - 1 < 0)
return prob
def truncated_poisson_factorial_moment(interval, r, p):
"""Compute mu_r, the r-th factorial moment of a poisson random
variable of parameter `p` truncated to `interval = (b, a)`."""
b, a = interval
return p ** r * (1 - ((poisson_interval((a - r + 1, a), p) -
poisson_interval((b - r, b - 1), p)) /
poisson_interval((b, a), p)))
def edgeworth(intervals):
"""Compute the Edgeworth expansion term of Sison & Glaz's formula
(1) (approximated probability for multinomial proportions in a
given box)."""
# Compute means and central moments of the truncated poisson
# variables.
mu_r1, mu_r2, mu_r3, mu_r4 = [
np.array([truncated_poisson_factorial_moment(interval, r, p)
for (interval, p) in zip(intervals, counts)])
for r in range(1, 5)
]
mu = mu_r1
mu2 = mu_r2 + mu - mu ** 2
mu3 = mu_r3 + mu_r2 * (3 - 3 * mu) + mu - 3 * mu ** 2 + 2 * mu ** 3
mu4 = (mu_r4 + mu_r3 * (6 - 4 * mu) +
mu_r2 * (7 - 12 * mu + 6 * mu ** 2) +
mu - 4 * mu ** 2 + 6 * mu ** 3 - 3 * mu ** 4)
# Compute expansion factors, gamma_1 and gamma_2.
g1 = mu3.sum() / mu2.sum() ** 1.5
g2 = (mu4.sum() - 3 * (mu2 ** 2).sum()) / mu2.sum() ** 2
# Compute the expansion itself.
x = (n - mu.sum()) / np.sqrt(mu2.sum())
phi = np.exp(- x ** 2 / 2) / np.sqrt(2 * np.pi)
H3 = x ** 3 - 3 * x
H4 = x ** 4 - 6 * x ** 2 + 3
H6 = x ** 6 - 15 * x ** 4 + 45 * x ** 2 - 15
f = phi * (1 + g1 * H3 / 6 + g2 * H4 / 24 + g1 ** 2 * H6 / 72)
return f / np.sqrt(mu2.sum())
def approximated_multinomial_interval(intervals):
"""Compute approximated probability for Multinomial(n, proportions)
to be in `intervals` (Sison & Glaz's formula (1))."""
return np.exp(
np.sum(np.log([poisson_interval(interval, p)
for (interval, p) in zip(intervals, counts)])) +
np.log(edgeworth(intervals)) -
np.log(stats.poisson._pmf(n, n))
)
def nu(c):
"""Compute interval coverage for a given `c` (Sison & Glaz's
formula (7))."""
return approximated_multinomial_interval(
[(np.maximum(count - c, 0), np.minimum(count + c, n))
for count in counts])
# Find the value of `c` that will give us the confidence intervals
# (solving nu(c) <= 1 - alpha < nu(c + 1).
c = 1.0
nuc = nu(c)
nucp1 = nu(c + 1)
while not (nuc <= (1 - alpha) < nucp1):
if c > n:
raise Exception("Couldn't find a value for `c` that "
"solves nu(c) <= 1 - alpha < nu(c + 1)")
c += 1
nuc = nucp1
nucp1 = nu(c + 1)
# Compute gamma and the corresponding confidence intervals.
g = (1 - alpha - nuc) / (nucp1 - nuc)
ci_lower = np.maximum(proportions - c / n, 0)
ci_upper = np.minimum(proportions + (c + 2 * g) / n, 1)
region = np.array([ci_lower, ci_upper]).T
else:
raise NotImplementedError('method "%s" is not available' % method)
return region
def samplesize_confint_proportion(proportion, half_length, alpha=0.05,
method='normal'):
'''find sample size to get desired confidence interval length
Parameters
----------
proportion : float in (0, 1)
proportion or quantile
half_length : float in (0, 1)
desired half length of the confidence interval
alpha : float in (0, 1)
significance level, default 0.05,
coverage of the two-sided interval is (approximately) ``1 - alpha``
method : str in ['normal']
method to use for confidence interval,
currently only normal approximation
Returns
-------
n : float
sample size to get the desired half length of the confidence interval
Notes
-----
this is mainly to store the formula.
possible application: number of replications in bootstrap samples
'''
q_ = proportion
if method == 'normal':
n = q_ * (1 - q_) / (half_length / stats.norm.isf(alpha / 2.))**2
else:
raise NotImplementedError('only "normal" is available')
return n
def proportion_effectsize(prop1, prop2, method='normal'):
'''
Effect size for a test comparing two proportions
for use in power function
Parameters
----------
prop1, prop2 : float or array_like
The proportion value(s).
Returns
-------
es : float or ndarray
effect size for (transformed) prop1 - prop2
Notes
-----
only method='normal' is implemented to match pwr.p2.test
see http://www.statmethods.net/stats/power.html
Effect size for `normal` is defined as ::
2 * (arcsin(sqrt(prop1)) - arcsin(sqrt(prop2)))
I think other conversions to normality can be used, but I need to check.
Examples
--------
>>> import statsmodels.api as sm
>>> sm.stats.proportion_effectsize(0.5, 0.4)
0.20135792079033088
>>> sm.stats.proportion_effectsize([0.3, 0.4, 0.5], 0.4)
array([-0.21015893, 0. , 0.20135792])
'''
if method != 'normal':
raise ValueError('only "normal" is implemented')
es = 2 * (np.arcsin(np.sqrt(prop1)) - np.arcsin(np.sqrt(prop2)))
return es
def std_prop(prop, nobs):
'''standard error for the estimate of a proportion
This is just ``np.sqrt(p * (1. - p) / nobs)``
Parameters
----------
prop : array_like
proportion
nobs : int, array_like
number of observations
Returns
-------
std : array_like
standard error for a proportion of nobs independent observations
'''
return np.sqrt(prop * (1. - prop) / nobs)
def _std_diff_prop(p1, p2, ratio=1):
return np.sqrt(p1 * (1 - p1) + p2 * (1 - p2) / ratio)
def _power_ztost(mean_low, var_low, mean_upp, var_upp, mean_alt, var_alt,
alpha=0.05, discrete=True, dist='norm', nobs=None,
continuity=0, critval_continuity=0):
'''Generic statistical power function for normal based equivalence test
This includes options to adjust the normal approximation and can use
the binomial to evaluate the probability of the rejection region
see power_ztost_prob for a description of the options
'''
# TODO: refactor structure, separate norm and binom better
if not isinstance(continuity, tuple):
continuity = (continuity, continuity)
crit = stats.norm.isf(alpha)
k_low = mean_low + np.sqrt(var_low) * crit
k_upp = mean_upp - np.sqrt(var_upp) * crit
if discrete or dist == 'binom':
k_low = np.ceil(k_low * nobs + 0.5 * critval_continuity)
k_upp = np.trunc(k_upp * nobs - 0.5 * critval_continuity)
if dist == 'norm':
#need proportion
k_low = (k_low) * 1. / nobs #-1 to match PASS
k_upp = k_upp * 1. / nobs
# else:
# if dist == 'binom':
# #need counts
# k_low *= nobs
# k_upp *= nobs
#print mean_low, np.sqrt(var_low), crit, var_low
#print mean_upp, np.sqrt(var_upp), crit, var_upp
if np.any(k_low > k_upp): #vectorize
import warnings
warnings.warn("no overlap, power is zero", HypothesisTestWarning)
std_alt = np.sqrt(var_alt)
z_low = (k_low - mean_alt - continuity[0] * 0.5 / nobs) / std_alt
z_upp = (k_upp - mean_alt + continuity[1] * 0.5 / nobs) / std_alt
if dist == 'norm':
power = stats.norm.cdf(z_upp) - stats.norm.cdf(z_low)
elif dist == 'binom':
power = (stats.binom.cdf(k_upp, nobs, mean_alt) -
stats.binom.cdf(k_low-1, nobs, mean_alt))
return power, (k_low, k_upp, z_low, z_upp)
def binom_tost(count, nobs, low, upp):
'''exact TOST test for one proportion using binomial distribution
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
low, upp : floats
lower and upper limit of equivalence region
Returns
-------
pvalue : float
p-value of equivalence test
pval_low, pval_upp : floats
p-values of lower and upper one-sided tests
'''
# binom_test_stat only returns pval
tt1 = binom_test(count, nobs, alternative='larger', prop=low)
tt2 = binom_test(count, nobs, alternative='smaller', prop=upp)
return np.maximum(tt1, tt2), tt1, tt2,
def binom_tost_reject_interval(low, upp, nobs, alpha=0.05):
'''rejection region for binomial TOST
The interval includes the end points,
`reject` if and only if `r_low <= x <= r_upp`.
The interval might be empty with `r_upp < r_low`.
Parameters
----------
low, upp : floats
lower and upper limit of equivalence region
nobs : int
the number of trials or observations.
Returns
-------
x_low, x_upp : float
lower and upper bound of rejection region
'''
x_low = stats.binom.isf(alpha, nobs, low) + 1
x_upp = stats.binom.ppf(alpha, nobs, upp) - 1
return x_low, x_upp
def binom_test_reject_interval(value, nobs, alpha=0.05, alternative='two-sided'):
'''rejection region for binomial test for one sample proportion
The interval includes the end points of the rejection region.
Parameters
----------
value : float
proportion under the Null hypothesis
nobs : int
the number of trials or observations.
Returns
-------
x_low, x_upp : float
lower and upper bound of rejection region
'''
if alternative in ['2s', 'two-sided']:
alternative = '2s' # normalize alternative name
alpha = alpha / 2
if alternative in ['2s', 'smaller']:
x_low = stats.binom.ppf(alpha, nobs, value) - 1
else:
x_low = 0
if alternative in ['2s', 'larger']:
x_upp = stats.binom.isf(alpha, nobs, value) + 1
else :
x_upp = nobs
return x_low, x_upp
def binom_test(count, nobs, prop=0.5, alternative='two-sided'):
'''Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
prop : float, optional
The probability of success under the null hypothesis,
`0 <= prop <= 1`. The default value is `prop = 0.5`
alternative : str in ['two-sided', 'smaller', 'larger']
alternative hypothesis, which can be two-sided or either one of the
one-sided tests.
Returns
-------
p-value : float
The p-value of the hypothesis test
Notes
-----
This uses scipy.stats.binom_test for the two-sided alternative.
'''
if np.any(prop > 1.0) or np.any(prop < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative in ['2s', 'two-sided']:
pval = stats.binom_test(count, n=nobs, p=prop)
elif alternative in ['l', 'larger']:
pval = stats.binom.sf(count-1, nobs, prop)
elif alternative in ['s', 'smaller']:
pval = stats.binom.cdf(count, nobs, prop)
else:
raise ValueError('alternative not recognized\n'
'should be two-sided, larger or smaller')
return pval
def power_binom_tost(low, upp, nobs, p_alt=None, alpha=0.05):
if p_alt is None:
p_alt = 0.5 * (low + upp)
x_low, x_upp = binom_tost_reject_interval(low, upp, nobs, alpha=alpha)
power = (stats.binom.cdf(x_upp, nobs, p_alt) -
stats.binom.cdf(x_low-1, nobs, p_alt))
return power
def power_ztost_prop(low, upp, nobs, p_alt, alpha=0.05, dist='norm',
variance_prop=None, discrete=True, continuity=0,
critval_continuity=0):
'''Power of proportions equivalence test based on normal distribution
Parameters
----------
low, upp : floats
lower and upper limit of equivalence region
nobs : int
number of observations
p_alt : float in (0,1)
proportion under the alternative
alpha : float in (0,1)
significance level of the test
dist : str in ['norm', 'binom']
This defines the distribution to evaluate the power of the test. The
critical values of the TOST test are always based on the normal
approximation, but the distribution for the power can be either the
normal (default) or the binomial (exact) distribution.
variance_prop : None or float in (0,1)
If this is None, then the variances for the two one sided tests are
based on the proportions equal to the equivalence limits.
If variance_prop is given, then it is used to calculate the variance
for the TOST statistics. If this is based on an sample, then the
estimated proportion can be used.
discrete : bool
If true, then the critical values of the rejection region are converted
to integers. If dist is "binom", this is automatically assumed.
If discrete is false, then the TOST critical values are used as
floating point numbers, and the power is calculated based on the
rejection region that is not discretized.
continuity : bool or float
adjust the rejection region for the normal power probability. This has
and effect only if ``dist='norm'``
critval_continuity : bool or float
If this is non-zero, then the critical values of the tost rejection
region are adjusted before converting to integers. This affects both
distributions, ``dist='norm'`` and ``dist='binom'``.
Returns
-------
power : float
statistical power of the equivalence test.
(k_low, k_upp, z_low, z_upp) : tuple of floats
critical limits in intermediate steps
temporary return, will be changed
Notes
-----
In small samples the power for the ``discrete`` version, has a sawtooth
pattern as a function of the number of observations. As a consequence,
small changes in the number of observations or in the normal approximation
can have a large effect on the power.
``continuity`` and ``critval_continuity`` are added to match some results
of PASS, and are mainly to investigate the sensitivity of the ztost power
to small changes in the rejection region. From my interpretation of the
equations in the SAS manual, both are zero in SAS.
works vectorized
**verification:**
The ``dist='binom'`` results match PASS,
The ``dist='norm'`` results look reasonable, but no benchmark is available.
References
----------
SAS Manual: Chapter 68: The Power Procedure, Computational Resources
PASS Chapter 110: Equivalence Tests for One Proportion.
'''
mean_low = low
var_low = std_prop(low, nobs)**2
mean_upp = upp
var_upp = std_prop(upp, nobs)**2
mean_alt = p_alt
var_alt = std_prop(p_alt, nobs)**2
if variance_prop is not None:
var_low = var_upp = std_prop(variance_prop, nobs)**2
power = _power_ztost(mean_low, var_low, mean_upp, var_upp, mean_alt, var_alt,
alpha=alpha, discrete=discrete, dist=dist, nobs=nobs,
continuity=continuity, critval_continuity=critval_continuity)
return np.maximum(power[0], 0), power[1:]
def _table_proportion(count, nobs):
'''create a k by 2 contingency table for proportion
helper function for proportions_chisquare
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
Returns
-------
table : ndarray
(k, 2) contingency table
Notes
-----
recent scipy has more elaborate contingency table functions
'''
table = np.column_stack((count, nobs - count))
expected = table.sum(0) * table.sum(1)[:,None] * 1. / table.sum()
n_rows = table.shape[0]
return table, expected, n_rows
def proportions_ztest(count, nobs, value=None, alternative='two-sided',
prop_var=False):
"""
Test for proportions based on normal (z) test
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials. If this is array_like, then
the assumption is that this represents the number of successes for
each independent sample
nobs : {int, array_like}
the number of trials or observations, with the same length as
count.
value : float, array_like or None, optional
This is the value of the null hypothesis equal to the proportion in the
case of a one sample test. In the case of a two-sample test, the
null hypothesis is that prop[0] - prop[1] = value, where prop is the
proportion in the two samples. If not provided value = 0 and the null
is prop[0] = prop[1]
alternative : str in ['two-sided', 'smaller', 'larger']
The alternative hypothesis can be either two-sided or one of the one-
sided tests, smaller means that the alternative hypothesis is
``prop < value`` and larger means ``prop > value``. In the two sample
test, smaller means that the alternative hypothesis is ``p1 < p2`` and
larger means ``p1 > p2`` where ``p1`` is the proportion of the first
sample and ``p2`` of the second one.
prop_var : False or float in (0, 1)
If prop_var is false, then the variance of the proportion estimate is
calculated based on the sample proportion. Alternatively, a proportion
can be specified to calculate this variance. Common use case is to
use the proportion under the Null hypothesis to specify the variance
of the proportion estimate.
Returns
-------
zstat : float
test statistic for the z-test
p-value : float
p-value for the z-test
Examples
--------
>>> count = 5
>>> nobs = 83
>>> value = .05
>>> stat, pval = proportions_ztest(count, nobs, value)
>>> print('{0:0.3f}'.format(pval))
0.695
>>> import numpy as np
>>> from statsmodels.stats.proportion import proportions_ztest
>>> count = np.array([5, 12])
>>> nobs = np.array([83, 99])
>>> stat, pval = proportions_ztest(count, nobs)
>>> print('{0:0.3f}'.format(pval))
0.159
Notes
-----
This uses a simple normal test for proportions. It should be the same as
running the mean z-test on the data encoded 1 for event and 0 for no event
so that the sum corresponds to the count.
In the one and two sample cases with two-sided alternative, this test
produces the same p-value as ``proportions_chisquare``, since the
chisquare is the distribution of the square of a standard normal
distribution.
"""
# TODO: verify that this really holds
# TODO: add continuity correction or other improvements for small samples
# TODO: change options similar to propotion_ztost ?
count = np.asarray(count)
nobs = np.asarray(nobs)
if nobs.size == 1:
nobs = nobs * np.ones_like(count)
prop = count * 1. / nobs
k_sample = np.size(prop)
if value is None:
if k_sample == 1:
raise ValueError('value must be provided for a 1-sample test')
value = 0
if k_sample == 1:
diff = prop - value
elif k_sample == 2:
diff = prop[0] - prop[1] - value
else:
msg = 'more than two samples are not implemented yet'
raise NotImplementedError(msg)
p_pooled = np.sum(count) * 1. / np.sum(nobs)
nobs_fact = np.sum(1. / nobs)
if prop_var:
p_pooled = prop_var
var_ = p_pooled * (1 - p_pooled) * nobs_fact
std_diff = np.sqrt(var_)
from statsmodels.stats.weightstats import _zstat_generic2
return _zstat_generic2(diff, std_diff, alternative)
def proportions_ztost(count, nobs, low, upp, prop_var='sample'):
'''Equivalence test based on normal distribution
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials. If this is array_like, then
the assumption is that this represents the number of successes for
each independent sample
nobs : int
the number of trials or observations, with the same length as
count.
low, upp : float
equivalence interval low < prop1 - prop2 < upp
prop_var : str or float in (0, 1)
prop_var determines which proportion is used for the calculation
of the standard deviation of the proportion estimate
The available options for string are 'sample' (default), 'null' and
'limits'. If prop_var is a float, then it is used directly.
Returns
-------
pvalue : float
pvalue of the non-equivalence test
t1, pv1 : tuple of floats
test statistic and pvalue for lower threshold test
t2, pv2 : tuple of floats
test statistic and pvalue for upper threshold test
Notes
-----
checked only for 1 sample case
'''
if prop_var == 'limits':
prop_var_low = low
prop_var_upp = upp
elif prop_var == 'sample':
prop_var_low = prop_var_upp = False #ztest uses sample
elif prop_var == 'null':
prop_var_low = prop_var_upp = 0.5 * (low + upp)
elif np.isreal(prop_var):
prop_var_low = prop_var_upp = prop_var
tt1 = proportions_ztest(count, nobs, alternative='larger',
prop_var=prop_var_low, value=low)
tt2 = proportions_ztest(count, nobs, alternative='smaller',
prop_var=prop_var_upp, value=upp)
return np.maximum(tt1[1], tt2[1]), tt1, tt2,
def proportions_chisquare(count, nobs, value=None):
'''test for proportions based on chisquare test
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials. If this is array_like, then
the assumption is that this represents the number of successes for
each independent sample
nobs : int
the number of trials or observations, with the same length as
count.
value : None or float or array_like
Returns
-------
chi2stat : float
test statistic for the chisquare test
p-value : float
p-value for the chisquare test
(table, expected)
table is a (k, 2) contingency table, ``expected`` is the corresponding
table of counts that are expected under independence with given
margins
Notes
-----
Recent version of scipy.stats have a chisquare test for independence in
contingency tables.
This function provides a similar interface to chisquare tests as
``prop.test`` in R, however without the option for Yates continuity
correction.
count can be the count for the number of events for a single proportion,
or the counts for several independent proportions. If value is given, then
all proportions are jointly tested against this value. If value is not
given and count and nobs are not scalar, then the null hypothesis is
that all samples have the same proportion.
'''
nobs = np.atleast_1d(nobs)
table, expected, n_rows = _table_proportion(count, nobs)
if value is not None:
expected = np.column_stack((nobs * value, nobs * (1 - value)))
ddof = n_rows - 1
else:
ddof = n_rows
#print table, expected
chi2stat, pval = stats.chisquare(table.ravel(), expected.ravel(),
ddof=ddof)
return chi2stat, pval, (table, expected)
def proportions_chisquare_allpairs(count, nobs, multitest_method='hs'):
'''chisquare test of proportions for all pairs of k samples
Performs a chisquare test for proportions for all pairwise comparisons.
The alternative is two-sided
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
prop : float, optional
The probability of success under the null hypothesis,
`0 <= prop <= 1`. The default value is `prop = 0.5`
multitest_method : str
This chooses the method for the multiple testing p-value correction,
that is used as default in the results.
It can be any method that is available in ``multipletesting``.
The default is Holm-Sidak 'hs'.
Returns
-------
result : AllPairsResults instance
The returned results instance has several statistics, such as p-values,
attached, and additional methods for using a non-default
``multitest_method``.
Notes
-----
Yates continuity correction is not available.
'''
#all_pairs = lmap(list, lzip(*np.triu_indices(4, 1)))
all_pairs = lzip(*np.triu_indices(len(count), 1))
pvals = [proportions_chisquare(count[list(pair)], nobs[list(pair)])[1]
for pair in all_pairs]
return AllPairsResults(pvals, all_pairs, multitest_method=multitest_method)
def proportions_chisquare_pairscontrol(count, nobs, value=None,
multitest_method='hs', alternative='two-sided'):
'''chisquare test of proportions for pairs of k samples compared to control
Performs a chisquare test for proportions for pairwise comparisons with a
control (Dunnet's test). The control is assumed to be the first element
of ``count`` and ``nobs``. The alternative is two-sided, larger or
smaller.
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
prop : float, optional
The probability of success under the null hypothesis,
`0 <= prop <= 1`. The default value is `prop = 0.5`
multitest_method : str
This chooses the method for the multiple testing p-value correction,
that is used as default in the results.
It can be any method that is available in ``multipletesting``.
The default is Holm-Sidak 'hs'.
alternative : str in ['two-sided', 'smaller', 'larger']
alternative hypothesis, which can be two-sided or either one of the
one-sided tests.
Returns
-------
result : AllPairsResults instance
The returned results instance has several statistics, such as p-values,
attached, and additional methods for using a non-default
``multitest_method``.
Notes
-----
Yates continuity correction is not available.
``value`` and ``alternative`` options are not yet implemented.
'''
if (value is not None) or (alternative not in ['two-sided', '2s']):
raise NotImplementedError
#all_pairs = lmap(list, lzip(*np.triu_indices(4, 1)))
all_pairs = [(0, k) for k in range(1, len(count))]
pvals = [proportions_chisquare(count[list(pair)], nobs[list(pair)],
#alternative=alternative)[1]
)[1]
for pair in all_pairs]
return AllPairsResults(pvals, all_pairs, multitest_method=multitest_method)
def confint_proportions_2indep(count1, nobs1, count2, nobs2, method=None,
compare='diff', alpha=0.05, correction=True):
"""Confidence intervals for comparing two independent proportions
This assumes that we have two independent binomial samples.
Parameters
----------
count1, nobs1 :
count and sample size for first sample
count2, nobs2 :
count and sample size for the second sample
method : string
method for computing confidence interval. If method is None, then a
default method is used. The default might change as more methods are
added.
diff:
- 'wald',
- 'agresti-caffo'
- 'newcomb' (default)
- 'score'
ratio:
- 'log'
- 'log-adjusted' (default)
- 'score'
odds-ratio:
- 'logit'
- 'logit-adjusted' (default)
- 'score'
compare : string in ['diff', 'ratio' 'odds-ratio']
If compare is diff, then the confidence interval is for diff = p1 - p2.
If compare is ratio, then the confidence interval is for the risk ratio
defined by ratio = p1 / p2.
If compare is odds-ratio, then the confidence interval is for the
odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2)
alpha : float
significance leverl for the confidence interval, default is 0.05.
The nominal coverage probability is 1 - alpha.
Returns
-------
low, upp
Notes
-----
Status: experimental, API and defaults might still change.
more ``methods`` will be added.
"""
method_default = {'diff': 'newcomb',
'ratio': 'log-adjusted',
'odds-ratio': 'logit-adjusted'}
# normalize compare name
if compare.lower() == 'or':
compare = 'odds-ratio'
if method is None:
method = method_default[compare]
method = method.lower()
if method.startswith('agr'):
method = 'agresti-caffo'
p1 = count1 / nobs1
p2 = count2 / nobs2
diff = p1 - p2
addone = 1 if method == 'agresti-caffo' else 0
if compare == 'diff':
if method in ['wald', 'agresti-caffo']:
count1_, nobs1_ = count1 + addone, nobs1 + 2 * addone
count2_, nobs2_ = count2 + addone, nobs2 + 2 * addone
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
diff_ = p1_ - p2_
var = p1_ * (1 - p1_) / nobs1_ + p2_ * (1 - p2_) / nobs2_
z = stats.norm.isf(alpha / 2)
d_wald = z * np.sqrt(var)
low = diff_ - d_wald
upp = diff_ + d_wald
elif method.startswith('newcomb'):
low1, upp1 = proportion_confint(count1, nobs1,
method='wilson', alpha=alpha)
low2, upp2 = proportion_confint(count2, nobs2,
method='wilson', alpha=alpha)
d_low = np.sqrt((p1 - low1)**2 + (upp2 - p2)**2)
d_upp = np.sqrt((p2 - low2)**2 + (upp1 - p1)**2)
low = diff - d_low
upp = diff + d_upp
elif method == "score":
low, upp = score_confint_inversion(count1, nobs1, count2, nobs2,
compare=compare, alpha=alpha,
correction=correction)
else:
raise ValueError('method not recognized')
elif compare == 'ratio':
# ratio = p1 / p2
if method in ['log', 'log-adjusted']:
addhalf = 0.5 if method == 'log-adjusted' else 0
count1_, nobs1_ = count1 + addhalf, nobs1 + addhalf
count2_, nobs2_ = count2 + addhalf, nobs2 + addhalf
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
ratio_ = p1_ / p2_
var = (1 / count1_) - 1 / nobs1_ + 1 / count2_ - 1 / nobs2_
z = stats.norm.isf(alpha / 2)
d_log = z * np.sqrt(var)
low = np.exp(np.log(ratio_) - d_log)
upp = np.exp(np.log(ratio_) + d_log)
elif method == 'score':
res = _confint_riskratio_koopman(count1, nobs1, count2, nobs2,
alpha=alpha,
correction=correction)
low, upp = res.confint
else:
raise ValueError('method not recognized')
elif compare == 'odds-ratio':
# odds_ratio = p1 / (1 - p1) / p2 * (1 - p2)
if method in ['logit', 'logit-adjusted', 'logit-smoothed']:
if method in ['logit-smoothed']:
adjusted = _shrink_prob(count1, nobs1, count2, nobs2,
shrink_factor=2, return_corr=False)[0]
count1_, nobs1_, count2_, nobs2_ = adjusted
else:
addhalf = 0.5 if method == 'logit-adjusted' else 0
count1_, nobs1_ = count1 + addhalf, nobs1 + 2 * addhalf
count2_, nobs2_ = count2 + addhalf, nobs2 + 2 * addhalf
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
odds_ratio_ = p1_ / (1 - p1_) / p2_ * (1 - p2_)
var = (1 / count1_ + 1 / (nobs1_ - count1_) +
1 / count2_ + 1 / (nobs2_ - count2_))
z = stats.norm.isf(alpha / 2)
d_log = z * np.sqrt(var)
low = np.exp(np.log(odds_ratio_) - d_log)
upp = np.exp(np.log(odds_ratio_) + d_log)
elif method == "score":
low, upp = score_confint_inversion(count1, nobs1, count2, nobs2,
compare=compare, alpha=alpha,
correction=correction)
else:
raise ValueError('method not recognized')
else:
raise ValueError('compare not recognized')
return low, upp
def _shrink_prob(count1, nobs1, count2, nobs2, shrink_factor=2,
return_corr=True):
"""shrink observed counts towards independence
Helper function for 'logit-smoothed' inference for the odds-ratio of two
independent proportions.
Parameters
----------
count1, nobs1 : float or int
count and sample size for first sample
count2, nobs2 : float or int
count and sample size for the second sample
shrink_factor : float
This corresponds to the number of observations that are added in total
proportional to the probabilities under independence.
return_corr : bool
If true, then only the correction term is returned
If false, then the corrected counts, i.e. original counts plus
correction term, are returned.
Returns
-------
count1_corr, nobs1_corr, count2_corr, nobs2_corr : float
correction or corrected counts
prob_indep :
TODO/Warning : this will change most likely
probabilities under independence, only returned if return_corr is
false.
"""
nobs_col = np.array([count1 + count2, nobs1 - count1 + nobs2 - count2])
nobs_row = np.array([nobs1, nobs2])
nobs = nobs1 + nobs2
prob_indep = (nobs_col * nobs_row[:, None]) / nobs**2
corr = shrink_factor * prob_indep
if return_corr:
return (corr[0, 0], corr[0].sum(), corr[1, 0], corr[1].sum())
else:
return (count1 + corr[0, 0], nobs1 + corr[0].sum(),
count2 + corr[1, 0], nobs2 + corr[1].sum()), prob_indep
def score_test_proportions_2indep(count1, nobs1, count2, nobs2, value=None,
compare='diff', alternative='two-sided',
correction=True, return_results=True):
"""score_test for two independent proportions
This uses the constrained estimate of the proportions to compute
the variance under the Null hypothesis.
Parameters
----------
count1, nobs1 :
count and sample size for first sample
count2, nobs2 :
count and sample size for the second sample
value : float
diff, ratio or odds-ratio under the null hypothesis. If value is None,
then equality of proportions under the Null is assumed,
i.e. value=0 for 'diff' or value=1 for either rate or odds-ratio.
compare : string in ['diff', 'ratio' 'odds-ratio']
If compare is diff, then the confidence interval is for diff = p1 - p2.
If compare is ratio, then the confidence interval is for the risk ratio
defined by ratio = p1 / p2.
If compare is odds-ratio, then the confidence interval is for the
odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2)
return_results : bool
If true, then a results instance with extra information is returned,
otherwise a tuple with statistic and pvalue is returned.
Returns
-------
results : results instance or tuple
If return_results is True, then a results instance with the
information in attributes is returned.
If return_results is False, then only ``statistic`` and ``pvalue``
are returned.
statistic : float
test statistic asymptotically normal distributed N(0, 1)
pvalue : float
p-value based on normal distribution
other attributes :
additional information about the hypothesis test
Notes
-----
Status: experimental, the type or extra information in the return might
change.
"""
value_default = 0 if compare == 'diff' else 1
if value is None:
# TODO: odds ratio does not work if value=1
value = value_default
nobs = nobs1 + nobs2
count = count1 + count2
p1 = count1 / nobs1
p2 = count2 / nobs2
if value == value_default:
# use pooled estimator if equality test
# shortcut, but required for odds ratio
prop0 = prop1 = count / nobs
# this uses index 0 from <NAME> 1985
count0, nobs0 = count2, nobs2
p0 = p2
if compare == 'diff':
diff = value # hypothesis value
if diff != 0:
tmp3 = nobs
tmp2 = (nobs1 + 2 * nobs0) * diff - nobs - count
tmp1 = (count0 * diff - nobs - 2 * count0) * diff + count
tmp0 = count0 * diff * (1 - diff)
q = ((tmp2 / (3 * tmp3))**3 - tmp1 * tmp2 / (6 * tmp3**2) +
tmp0 / (2 * tmp3))
p = np.sign(q) * np.sqrt((tmp2 / (3 * tmp3))**2 -
tmp1 / (3 * tmp3))
a = (np.pi + np.arccos(q / p**3)) / 3
prop0 = 2 * p * np.cos(a) - tmp2 / (3 * tmp3)
prop1 = prop0 + diff
correction = True
var = prop1 * (1 - prop1) / nobs1 + prop0 * (1 - prop0) / nobs0
if correction:
var *= nobs / (nobs - 1)
diff_stat = (p1 - p0 - diff)
elif compare == 'ratio':
# risk ratio
ratio = value
if ratio != 1:
a = nobs * ratio
b = -(nobs1 * ratio + count1 + nobs2 + count0 * ratio)
c = count
prop0 = (-b - np.sqrt(b**2 - 4 * a * c)) / (2 * a)
prop1 = prop0 * ratio
var = (prop1 * (1 - prop1) / nobs1 +
ratio**2 * prop0 * (1 - prop0) / nobs0)
if correction:
var *= nobs / (nobs - 1)
# NCSS looks incorrect for var, but it is what should be reported
# diff_stat = (p1 / p0 - ratio) # NCSS/PASS
diff_stat = (p1 - ratio * p0) # <NAME>
elif compare in ['or', 'odds-ratio']:
# odds ratio
oratio = value
if oratio != 1:
# Note the constraint estimator does not handle odds-ratio = 1
a = nobs0 * (oratio - 1)
b = nobs1 * oratio + nobs0 - count * (oratio - 1)
c = -count
prop0 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2 * a)
prop1 = prop0 * oratio / (1 + prop0 * (oratio - 1))
var = (1 / (prop1 * (1 - prop1) * nobs1) +
1 / (prop0 * (1 - prop0) * nobs0))
if correction:
var *= nobs / (nobs - 1)
diff_stat = ((p1 - prop1) / (prop1 * (1 - prop1)) -
(p0 - prop0) / (prop0 * (1 - prop0)))
statistic, pvalue = _zstat_generic2(diff_stat, np.sqrt(var),
alternative=alternative)
if return_results:
res = HolderTuple(statistic=statistic,
pvalue=pvalue,
compare=compare,
method='score',
variance=var,
alternative=alternative,
prop1_null=prop1,
prop2_null=prop0,
)
return res
else:
return statistic, pvalue
def test_proportions_2indep(count1, nobs1, count2, nobs2, value=None,
method=None, compare='diff',
alternative='two-sided', correction=True,
return_results=True):
"""Hypothesis test for comparing two independent proportions
This assumes that we have two independent binomial samples.
The Null and alternative hypothesis are
for compare = 'diff'
H0: prop1 - prop2 - value = 0
H1: prop1 - prop2 - value != 0 if alternative = 'two-sided'
H1: prop1 - prop2 - value > 0 if alternative = 'larger'
H1: prop1 - prop2 - value < 0 if alternative = 'smaller'
for compare = 'ratio'
H0: prop1 / prop2 - value = 0
H1: prop1 / prop2 - value != 0 if alternative = 'two-sided'
H1: prop1 / prop2 - value > 0 if alternative = 'larger'
H1: prop1 / prop2 - value < 0 if alternative = 'smaller'
for compare = 'odds-ratio'
H0: or - value = 0
H1: or - value != 0 if alternative = 'two-sided'
H1: or - value > 0 if alternative = 'larger'
H1: or - value < 0 if alternative = 'smaller'
where odds-ratio or = prop1 / (1 - prop1) / (prop2 / (1 - prop2))
Parameters
----------
count1, nobs1 :
count and sample size for first sample
count2, nobs2 :
count and sample size for the second sample
method : string
method for computing confidence interval. If method is None, then a
default method is used. The default might change as more methods are
added.
diff:
- 'wald',
- 'agresti-caffo'
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985
ratio:
- 'log': wald test using log transformation
- 'log-adjusted': wald test using log transformation,
adds 0.5 to counts
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985
odds-ratio:
- 'logit': wald test using logit transformation
- 'logit-adjusted': : wald test using logit transformation,
adds 0.5 to counts
- 'logit-smoothed': : wald test using logit transformation, biases
cell counts towards independence by adding two observations in
total.
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985
compare : string in ['diff', 'ratio' 'odds-ratio']
If compare is `diff`, then the confidence interval is for
diff = p1 - p2.
If compare is `ratio`, then the confidence interval is for the
risk ratio defined by ratio = p1 / p2.
If compare is `odds-ratio`, then the confidence interval is for the
odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2)
alternative : string in ['two-sided', 'smaller', 'larger']
alternative hypothesis, which can be two-sided or either one of the
one-sided tests.
correction : bool
If correction is True (default), then the Miettinen and Nurminen
small sample correction to the variance nobs / (nobs - 1) is used.
Applies only if method='score'.
return_results : bool
If true, then a results instance with extra information is returned,
otherwise a tuple with statistic and pvalue is returned.
Returns
-------
results : results instance or tuple
If return_results is True, then a results instance with the
information in attributes is returned.
If return_results is False, then only ``statistic`` and ``pvalue``
are returned.
statistic : float
test statistic asymptotically normal distributed N(0, 1)
pvalue : float
p-value based on normal distribution
other attributes :
additional information about the hypothesis test
Notes
-----
Status: experimental, API and defaults might still change.
More ``methods`` will be added.
"""
method_default = {'diff': 'agresti-caffo',
'ratio': 'log-adjusted',
'odds-ratio': 'logit-adjusted'}
# normalize compare name
if compare.lower() == 'or':
compare = 'odds-ratio'
if method is None:
method = method_default[compare]
method = method.lower()
if method.startswith('agr'):
method = 'agresti-caffo'
if value is None:
# TODO: odds ratio does not work if value=1 for score test
value = 0 if compare == 'diff' else 1
p1 = count1 / nobs1
p2 = count2 / nobs2
diff = p1 - p2
ratio = p1 / p2
odds_ratio = p1 / (1 - p1) / p2 * (1 - p2)
res = None
if compare == 'diff':
if method in ['wald', 'agresti-caffo']:
addone = 1 if method == 'agresti-caffo' else 0
count1_, nobs1_ = count1 + addone, nobs1 + 2 * addone
count2_, nobs2_ = count2 + addone, nobs2 + 2 * addone
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
diff_stat = p1_ - p2_ - value
var = p1_ * (1 - p1_) / nobs1_ + p2_ * (1 - p2_) / nobs2_
statistic = diff_stat / np.sqrt(var)
distr = 'normal'
elif method.startswith('newcomb'):
msg = 'newcomb not available for hypothesis test'
raise NotImplementedError(msg)
elif method == 'score':
# Note score part is the same call for all compare
res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,
value=value, compare=compare,
alternative=alternative,
correction=correction,
return_results=return_results)
if return_results is False:
statistic, pvalue = res[:2]
distr = 'normal'
# TODO/Note score_test_proportion_2samp returns statistic and
# not diff_stat
diff_stat = None
else:
raise ValueError('method not recognized')
elif compare == 'ratio':
if method in ['log', 'log-adjusted']:
addhalf = 0.5 if method == 'log-adjusted' else 0
count1_, nobs1_ = count1 + addhalf, nobs1 + addhalf
count2_, nobs2_ = count2 + addhalf, nobs2 + addhalf
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
ratio_ = p1_ / p2_
var = (1 / count1_) - 1 / nobs1_ + 1 / count2_ - 1 / nobs2_
diff_stat = np.log(ratio_) - np.log(value)
statistic = diff_stat / np.sqrt(var)
distr = 'normal'
elif method == 'score':
res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,
value=value, compare=compare,
alternative=alternative,
correction=correction,
return_results=return_results)
if return_results is False:
statistic, pvalue = res[:2]
distr = 'normal'
diff_stat = None
else:
raise ValueError('method not recognized')
elif compare == "odds-ratio":
if method in ['logit', 'logit-adjusted', 'logit-smoothed']:
if method in ['logit-smoothed']:
adjusted = _shrink_prob(count1, nobs1, count2, nobs2,
shrink_factor=2, return_corr=False)[0]
count1_, nobs1_, count2_, nobs2_ = adjusted
else:
addhalf = 0.5 if method == 'logit-adjusted' else 0
count1_, nobs1_ = count1 + addhalf, nobs1 + 2 * addhalf
count2_, nobs2_ = count2 + addhalf, nobs2 + 2 * addhalf
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
odds_ratio_ = p1_ / (1 - p1_) / p2_ * (1 - p2_)
var = (1 / count1_ + 1 / (nobs1_ - count1_) +
1 / count2_ + 1 / (nobs2_ - count2_))
diff_stat = np.log(odds_ratio_) - np.log(value)
statistic = diff_stat / np.sqrt(var)
distr = 'normal'
elif method == 'score':
res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,
value=value, compare=compare,
alternative=alternative,
correction=correction,
return_results=return_results)
if return_results is False:
statistic, pvalue = res[:2]
distr = 'normal'
diff_stat = None
else:
raise ValueError('method "%s" not recognized' % method)
else:
raise ValueError('compare "%s" not recognized' % compare)
if distr == 'normal' and diff_stat is not None:
statistic, pvalue = _zstat_generic2(diff_stat, np.sqrt(var),
alternative=alternative)
if return_results:
if res is None:
res = HolderTuple(statistic=statistic,
pvalue=pvalue,
compare=compare,
method=method,
diff=diff,
ratio=ratio,
odds_ratio=odds_ratio,
variance=var,
alternative=alternative,
value=value,
)
else:
# we already have a return result from score test
# add missing attributes
res.diff = diff
res.ratio = ratio
res.odds_ratio = odds_ratio
res.value = value
return res
else:
return statistic, pvalue
def tost_proportions_2indep(count1, nobs1, count2, nobs2, low, upp,
method=None, compare='diff', correction=True,
return_results=True):
"""
Equivalence test based on two one-sided `test_proportions_2indep`
This assumes that we have two independent binomial samples.
The Null and alternative hypothesis for equivalence testing are
for compare = 'diff'
H0: prop1 - prop2 <= low or upp <= prop1 - prop2
H1: low < prop1 - prop2 < upp
for compare = 'ratio'
H0: prop1 / prop2 <= low or upp <= prop1 / prop2
H1: low < prop1 / prop2 < upp
for compare = 'odds-ratio'
H0: or <= low or upp <= or
H1: low < or < upp
where odds-ratio or = prop1 / (1 - prop1) / (prop2 / (1 - prop2))
Parameters
----------
count1, nobs1 :
count and sample size for first sample
count2, nobs2 :
count and sample size for the second sample
low, upp :
equivalence margin for diff, risk ratio or odds ratio
method : string
method for computing confidence interval. If method is None, then a
default method is used. The default might change as more methods are
added.
diff:
- 'wald',
- 'agresti-caffo'
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985.
ratio:
- 'log': wald test using log transformation
- 'log-adjusted': wald test using log transformation,
adds 0.5 to counts
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985.
odds-ratio:
- 'logit': wald test using logit transformation
- 'logit-adjusted': : wald test using logit transformation,
adds 0.5 to counts
- 'logit-smoothed': : wald test using logit transformation, biases
cell counts towards independence by adding two observations in
total.
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985
compare : string in ['diff', 'ratio' 'odds-ratio']
If compare is `diff`, then the confidence interval is for
diff = p1 - p2.
If compare is `ratio`, then the confidence interval is for the
risk ratio defined by ratio = p1 / p2.
If compare is `odds-ratio`, then the confidence interval is for the
odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2).
correction : bool
If correction is True (default), then the Miettinen and Nurminen
small sample correction to the variance nobs / (nobs - 1) is used.
Applies only if method='score'.
return_results : bool
If true, then a results instance with extra information is returned,
otherwise a tuple with statistic and pvalue is returned.
Returns
-------
pvalue : float
p-value is the max of the pvalues of the two one-sided tests
t1 : test results
results instance for one-sided hypothesis at the lower margin
t1 : test results
results instance for one-sided hypothesis at the upper margin
Notes
-----
Status: experimental, API and defaults might still change.
"""
tt1 = test_proportions_2indep(count1, nobs1, count2, nobs2, value=low,
method=method, compare=compare,
alternative='larger',
correction=correction,
return_results=return_results)
tt2 = test_proportions_2indep(count1, nobs1, count2, nobs2, value=upp,
method=method, compare=compare,
alternative='smaller',
correction=correction,
return_results=return_results)
return np.maximum(tt1.pvalue, tt2.pvalue), tt1, tt2,
def _std_2prop_power(diff, p2, ratio=1, alpha=0.05, value=0):
"""compute standard error under null and alternative for 2 proportions
helper function for power and sample size computation
"""
if value != 0:
msg = 'non-zero diff under null, value, is not yet implemented'
raise NotImplementedError(msg)
nobs_ratio = ratio
p1 = p2 + diff
# The following contains currently redundant variables that will
# be useful for different options for the null variance
p_pooled = (p1 + p2 * ratio) / (1 + ratio)
# probabilities for the variance for the null statistic
p1_vnull, p2_vnull = p_pooled, p_pooled
p2_alt = p2
p1_alt = p2_alt + diff
std_null = _std_diff_prop(p1_vnull, p2_vnull)
std_alt = _std_diff_prop(p1_alt, p2_alt, ratio=nobs_ratio)
return p_pooled, std_null, std_alt
def power_proportions_2indep(diff, prop2, nobs1, ratio=1, alpha=0.05,
value=0, alternative='two-sided',
return_results=True):
"""power for ztest that two independent proportions are equal
This assumes that the variance is based on the pooled proportion
under the null and the non-pooled variance under the alternative
Parameters
----------
diff : float
difference between proportion 1 and 2 under the alternative
prop2 : float
proportion for the reference case, prop2, proportions for the
first case will be computing using p2 and diff
p1 = p2 + diff
nobs1 : float or int
number of observations in sample 1
ratio : float
sample size ratio, nobs2 = ratio * nobs1
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
value : float
currently only `value=0`, i.e. equality testing, is supported
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1, nobs2 = ration * nobs1. see description of nobs1.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
Alternative hypothesis whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
return_results : bool
If true, then a results instance with extra information is returned,
otherwise only the computed power is returned.
Returns
-------
results : results instance or float
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
other attributes in results instance include
p_pooled : pooled proportion, used for std_null
std_null : standard error of difference under the null hypothesis
(without sqrt(nobs))
std_alt : standard error of difference under the alternative
hypothesis (without sqrt(nobs))
"""
# TODO: avoid possible circular import, check if needed
from statsmodels.stats.power import normal_power_het
p_pooled, std_null, std_alt = _std_2prop_power(diff, prop2, ratio=1,
alpha=0.05, value=0)
pow_ = normal_power_het(diff, nobs1, alpha, std_null=std_null,
std_alternative=std_alt,
alternative=alternative)
if return_results:
res = Holder(power=pow_,
p_pooled=p_pooled,
std_null=std_null,
std_alt=std_alt,
nobs1=nobs1,
nobs2=ratio * nobs1,
nobs_ratio=ratio,
alpha=alpha,
)
return res
else:
return pow_
def samplesize_proportions_2indep_onetail(diff, prop2, power, ratio=1,
alpha=0.05, value=0,
alternative='two-sided'):
"""required sample size assuming normal distribution based on one tail
This uses an explicit computation for the sample size that is required
to achieve a given power corresponding to the appropriate tails of the
normal distribution. This ignores the far tail in a two-sided test
which is negligable in the common case when alternative and null are
far apart.
"""
# TODO: avoid possible circular import, check if needed
from statsmodels.stats.power import normal_sample_size_one_tail
if alternative in ['two-sided', '2s']:
alpha = alpha / 2
_, std_null, std_alt = _std_2prop_power(diff, prop2, ratio=ratio,
alpha=alpha, value=value)
nobs = normal_sample_size_one_tail(diff, power, alpha, std_null=std_null,
std_alternative=std_alt)
return nobs
def score_confint_inversion(count1, nobs1, count2, nobs2, compare='diff',
alpha=0.05, correction=True):
"""Compute score confidence interval by inverting score test
"""
def func(v):
r = test_proportions_2indep(count1, nobs1, count2, nobs2,
value=v, compare=compare, method='score',
correction=correction,
alternative="two-sided")
return r.pvalue - alpha
rt0 = test_proportions_2indep(count1, nobs1, count2, nobs2,
value=0, compare=compare, method='score',
correction=correction,
alternative="two-sided")
# use default method to get starting values
# this will not work if score confint becomes default
# maybe use "wald" as alias that works for all compare statistics
use_method = {"diff": "wald", "ratio": "log", "odds-ratio": "logit"}
rci0 = confint_proportions_2indep(count1, nobs1, count2, nobs2,
method=use_method[compare],
compare=compare, alpha=alpha)
# Note diff might be negative
ub = rci0[1] + np.abs(rci0[1]) * 0.5
lb = rci0[0] - | np.abs(rci0[0]) | numpy.abs |
import math
import numbers
import random
import numpy as np
import unittest
from unittest import TestCase
from util.stats import RunningStats
class TestRunningStats(TestCase):
def test_accumulative_diff(self):
rs = RunningStats()
rs.push(1987698.987)
self.assertEqual(0, rs.accumulative_diff())
rs.push(1987870.987)
self.assertEqual(172, rs.accumulative_diff())
rs.push(1987990.987)
self.assertEqual(292, rs.accumulative_diff())
rs.push(1988200.987)
self.assertEqual(502, rs.accumulative_diff())
rs.push(1988000.987)
self.assertEqual(302, rs.accumulative_diff())
rs = RunningStats()
[rs.push(n) for n in [1, 2, 4, 7, 0]]
self.assertEqual(np.diff([1, 2, 4, 7, 0]).sum(), rs.accumulative_diff())
rs = RunningStats()
values = | np.random.uniform(-100, 100, 100) | numpy.random.uniform |
import numpy as np
import json
from perceptron import Perceptron
from tester import getTestResults
import matplotlib.pyplot as plt;
plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def getPercentage(nbSuccess, nbTot):
return nbSuccess * 100 / nbTot
def getScore(expectedResults, actualResults):
score = 0
for expected, actual in zip(expectedResults, actualResults):
if expected == actual:
score += 1
return getPercentage(score, len(expectedResults))
def plotScores(perceptrons):
x = []
y = []
for perceptron in perceptrons:
x.append(perceptron.genre)
y.append(perceptron.score)
y, x = zip(*sorted(zip(y, x)))
y_pos = np.arange(len(x))
plt.bar(range(len(x)), y, align='center', color='#7ed6df')
plt.xticks(y_pos, x, fontsize=5, rotation=30)
plt.ylabel('Score (in %)', weight='bold', size='large')
plt.xlabel('Genre', weight='bold', size='large')
plt.title('Score prediction per genre')
plt.ylim(ymin=np.amin(y) - 5, ymax=100)
print("Average is " + str(np.average(y)))
plt.grid(True, which="both", linestyle='--')
plt.show()
def plotScoreDetails(perceptrons):
x = []
y0 = []
y1 = []
for perceptron in perceptrons:
x.append(perceptron.genre)
y0.append(perceptron.scoreWhenFalse)
y1.append(perceptron.scoreWhenTrue)
# create plot
fig, ax = plt.subplots()
index = np.arange(len(x))
bar_width = 0.35
opacity = 0.8
rects1 = plt.bar(index, y0, bar_width,
alpha=opacity,
color='b',
label='Score when false')
rects2 = plt.bar(index + bar_width, y1, bar_width,
alpha=opacity,
color='g',
label='Score when true')
plt.xlabel('Genres')
plt.ylabel('Scores %')
plt.title('Score details for each froup')
plt.xticks(index + bar_width, x, fontsize=5, rotation=30)
plt.legend()
plt.grid(True, which="both", linestyle='--')
print("Average when true " + str(np.average(y1)))
print("Average when false " + str(np.average(y0)))
plt.tight_layout()
plt.show()
nb_series = 10000
###### LOAD LAYER_ORDER #######
with open('layer_order_' + str(nb_series) + '.json', encoding="utf8") as layer_order:
layer_order = json.load(layer_order)
allWords = layer_order
###### LOAD PERCEPTRONS ######
with open('syn_weights_' + str(nb_series) + '.json', encoding="utf8") as json_file:
data = json.load(json_file)
perceptrons = []
for k in data:
genre = list(k.keys())[0]
perceptron = Perceptron(len(allWords))
perceptron.genre = genre
perceptron.nbSeries = nb_series
perceptron.weights = np.array(list(k.values())[0])
perceptrons.append(perceptron)
###### LOAD TEST_DATAS ######
with open('tests_1500_out_of_10000.json', encoding="utf8") as test_file:
test_datas = json.load(test_file)
results = getTestResults(perceptrons, test_datas)
print(results)
#perceptrons.sort(key=lambda x: x.nbSeries, reverse=True)
for perceptron in perceptrons:
exp = []
pred = []
for res in results:
currentExpected = results[res]['expected']
currentPredicted = results[res]['predicted']
exp.append(perceptron.genre in currentExpected)
pred.append(perceptron.genre in currentPredicted)
perceptron.score = getScore(np.array(exp), np.array(pred))
for perceptron in perceptrons:
exp = []
pred = []
exp1 = []
pred1 = []
exp0 = []
pred0 = []
nb = 0
for res in results:
currentExpected = results[res]['expected']
currentPredicted = results[res]['predicted']
exp.append(perceptron.genre in currentExpected)
pred.append(perceptron.genre in currentPredicted)
if perceptron.genre in currentExpected:
exp1.append(perceptron.genre in currentExpected)
pred1.append(perceptron.genre in currentPredicted)
nb += 1
else:
exp0.append(perceptron.genre in currentExpected)
pred0.append(perceptron.genre in currentPredicted)
print("nb for " + perceptron.genre + ": " + str(nb))
perceptron.score = getScore(np.array(exp), np.array(pred))
perceptron.scoreWhenTrue = getScore(np.array(exp1), np.array(pred1))
perceptron.scoreWhenFalse = getScore( | np.array(exp0) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: phil
"""
import numpy as np
import binascii
# from cStringIO import StringIO
from io import StringIO
import copy
from mpmath import mp
"""
Quantization vector is of the formed fixed(N, F). Where the first value indicates the
total number of bits and the second number indicates the location of the fractional point.
"""
__version__ = "1.1"
def bit_count(val):
"""
Fast way to count 1's in a 64 bit integer. Based on Hamming weight
"""
val = val - ((val >> 1) & 0x5555555555555555)
val = (val & 0x3333333333333333) + ((val >> 2) & 0x3333333333333333)
return (((val + (val >> 4)) & 0xF0F0F0F0F0F0F0F) * 0x101010101010101) >> 56
def r_shift(bin_str, new_val):
"""
Function performs a right shift of a binary string. Placing the new
value into the MSB position.
"""
offset = bin_str.find('b') + 1
new_val = str(new_val) + bin_str[offset:-1]
if (offset != -1):
new_val = '0b' + new_val
return new_val
def l_shift(bin_str, new_val):
"""
Function performs a left shift of a binary string. Placing the new
value into the LSB position.
"""
offset = bin_str.find('b') + 1
new_val = bin_str[offset + 1:] + str(new_val)
if (offset != -1):
new_val = '0b' + new_val
return new_val
def lappend(bin_str, str_append):
"""
Function left appends a binary string with string specified by
string append.
"""
offset_a = bin_str.find('b') + 1
offset_b = str_append.find('b') + 1
new_val = str_append[offset_b:] + bin_str[offset_a:]
if ((offset_a != -1) | (offset_b != -1)):
new_val = '0b' + new_val
return new_val
def lappend_udec(int_val, bit_val, num_bits):
"""
Function left appends int_val with bit_val. bit_val is assumed
to be one bit. num_bits is the number of bits to represent
unsigned integer int_val
"""
temp = np.floor(int_val / 2) + ((1 << (num_bits - 1)) * bit_val)
return temp.astype(np.int)
def collapse_byte(values):
"""
Function collapses a bit stream into unsigned integer representing bytes.
"""
temp = 0
byte_val = []
for i, val in enumerate(values):
idx = 7 - (i % 8)
temp += val << idx
if idx == 0:
byte_val.append(temp)
temp = 0
return byte_val
def uint_to_fp(vec, qvec=(16, 15), signed=0, overflow='wrap'):
max_int = int(comp_max_value(qvec, signed) * 2 ** qvec[1])
min_int = max_int + 1
vec_fp = []
for value in vec:
# value = float(value)
if value > max_int and signed == 1:
# negative value
value = -1 * (min_int - (value % min_int))
vec_fp.append(value * (2 ** -qvec[1]))
return ret_fi(vec_fp, qvec=qvec, overflow=overflow, signed=signed)
class range_fi(object):
def __init__(self, min_int, max_int, step):
self.max = max_int
self.min = min_int
self.step = step
class Fi(object):
def __init__(self, vec, qvec=(16, 15), overflow='wrap', signed=1):
"""
Simple fixed integer object to hold parameters related to a \
fixed point object.
"""
self.vec = vec
self.qvec = qvec
self.overflow = overflow
self.signed = signed
self.comp = False
if np.iscomplexobj(vec):
self.comp = True
@property
def bin(self):
"""
Converts vector to 2's complement binary values.
"""
num_chars = self.qvec[0]
if self.comp:
real_vals = [dec_to_bin(np.real(value).astype(np.int), num_chars) for value in self.vec]
imag_vals = [dec_to_bin(np.imag(value).astype(np.int), num_chars) for value in self.vec]
return [real_val + (",j" + imag_val) for (real_val, imag_val) in zip(real_vals, imag_vals)]
else:
return [dec_to_bin(value, num_chars) for value in self.vec]
@property
def udec(self):
"""
Returns unsigned decimal integer of the vector
"""
values = copy.deepcopy(self.vec)
# min_int = int(comp_min_value(self.qvec, 0) * 2 ** self.qvec[1])
max_int = int(comp_max_value(self.qvec, 0) * 2 ** self.qvec[1])
num_chars = self.qvec[0]
if self.comp:
real_vals = np.real(values)
neg_idx = (real_vals < 0)
real_vals[neg_idx] += (max_int + 1)
imag_vals = np.imag(values)
neg_idx = (imag_vals < 0)
imag_vals[neg_idx] += (max_int + 1)
return (real_vals + 1j * imag_vals)
else:
real_vals = np.real(values)
neg_idx = (real_vals < 0)
real_vals[neg_idx] += (max_int + 1)
return real_vals
@property
def hex(self):
"""
Converts vector to 2's complement hexadecimal values.
"""
num_chars = int(np.ceil(self.qvec[0] / 4.))
if self.comp:
real_vals = dec_to_hex(np.real(self.vec).astype(np.int), num_chars)
imag_vals = dec_to_hex(np.imag(self.vec).astype(np.int), num_chars)
return [real_val + (",j" + imag_val) for (real_val, imag_val) in zip(real_vals, imag_vals)]
else:
return dec_to_hex(self.vec, num_chars)
@property
def len(self):
return (len(self.vec))
# overriding built in len term.
def __len__(self):
return (len(self.vec))
# def __getslice__(self, lidx, ridx):
# """
# Overloaded getslice method.
# """
# self.vec = self.vec[lidx, ridx]
# # return self
# #
# def __getitem__(self, index)
@property
def float(self):
return (self.vec * 2. ** (-self.qvec[1]))
@property
def max_float(self):
return np.max(self.float)
@property
def max_udec(self):
return np.max(self.udec)
@property
def min_udec(self):
return np.min(self.udec)
@property
def min_float(self):
return np.min(self.float)
@property
def max(self):
return np.max(self.vec)
@property
def min(self):
return np.min(self.vec)
@property
def range(self):
min_int = comp_min_value(self.qvec, self.signed)
max_int = comp_max_value(self.qvec, self.signed)
step = comp_slope_value(self.qvec)
return range_fi(min_int, max_int, step)
def __getslice__(self, i, j):
return self.vec[i:j]
def gen_full_data(self):
range_obj = self.range
vec = np.arange(range_obj.min, range_obj.max, range_obj.step)
self.vec = (vec * (2 ** self.qvec[1])).astype(np.int)
def __repr__(self):
c_str = StringIO()
c_str.write(' qvec : {}\n'.format(self.qvec))
c_str.write('overflow : {}\n'.format(self.overflow))
c_str.write(' signed : {}\n'.format(self.signed))
# , self.__class__.__name__, self.block_name
c_str.seek(0)
return c_str.getvalue()
def coe_write(fi_obj, radix=16, file_name=None, filter_type=False):
"""
Function takes a fixed point vector as input and generates a Xilinx
compatibily .coe file for ROM/RAM initialization.
==========
Parameters
==========
* fi_obj : fixed integer object
Fixed Point object generated by fixed point toolbox.
* radix : int (16)
Radix used for formatting .coe file.
* file_name : str
File name used for outputting file to correct location
and name.
=======
Returns
=======
Correctly formatted .coe file for use by Xilinx coregenerator
modules.
"""
fi_vec = fi_obj.vec
signed = fi_obj.signed
word_length = fi_obj.qvec[0]
fraction_length = fi_obj.qvec[1]
assert(file_name is not None), 'User must specify File Name'
# find last forward slash
idx = str(file_name[::-1]).find('/')
if (idx == -1):
idx = 0
else:
idx = len(file_name) - 1 - idx
if (str(file_name).find('.', idx) == -1):
file_name = file_name + '.coe'
str_val = 'Radix must of the following: 2, 8, 10, 16'
assert(radix == 16 or radix == 10 or radix == 8 or radix == 2), str_val
with open(file_name, 'w') as f:
f.write('; Initialization File : \n')
if signed:
f.write('; Signed Fixed Point\n')
else:
f.write('; Unsigned Fixed Point\n')
# skip = 2
f.write('; Word Length : %d\n' % word_length)
f.write('; Fraction Length : %d\n' % fraction_length)
f.write('; Number of Entries : %d\n\n' % len(fi_vec))
if (filter_type is False):
f.write('memory_initialization_radix = ' + str(radix) + ';\n')
f.write('memory_initialization_vector = ' + '\n')
else:
f.write('Radix = ' + str(radix) + ';\n')
f.write('Coefficient_Width = %d;\n' % word_length)
f.write('CoefData = \n')
mod_fac = (1 << word_length)
if radix == 16:
num_chars = int(np.ceil(word_length / 4.))
format_str = '0{}X'.format(num_chars)
elif radix == 8:
num_chars = int(np.ceil(word_length / 3.))
format_str = '0{}o'.format(num_chars)
elif radix == 2:
format_str = '0{}b'.format(word_length)
for (ii, val) in enumerate(fi_vec):
if radix == 16:
temp = (val + mod_fac) % mod_fac
temp = format(temp, format_str)
elif radix == 8:
temp = (val + mod_fac) % mod_fac
temp = format(temp, format_str)
elif radix == 10:
temp = str(val)
elif radix == 2:
temp = (val + mod_fac) % mod_fac
temp = format(temp, format_str)
f.write(temp)
if ii == (len(fi_vec) - 1):
f.write(';')
else:
f.write(',\n')
def comp_frac_width(value, word_width, signed=0):
"""
Function computes the optimal fractional width given the vector and the word_width
"""
shift_val = -1
temp_val = value
bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
while bit_shift < 0:
temp_val = temp_val * 2
shift_val += 1
bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
if (bit_shift >= shift_val):
shift_val = -bit_shift
frac_width = word_width - signed + shift_val
return frac_width
def comp_min_value(qvec, signed=0):
"""
Computes the mimimum real value given the fixed point representation
"""
word_width = qvec[0]
frac_width = qvec[1]
min_val = -1 * 2.**(word_width - signed) / (2.**frac_width)
if signed == 0:
min_val = 0
return min_val
def comp_max_value(qvec, signed=0):
"""
Computes maximum real value given the fixed point representation, qvec.
"""
word_width = qvec[0]
frac_width = qvec[1]
max_val = 2.**(word_width - signed) / (2.**frac_width)
max_val -= 2.**(-frac_width)
return max_val
def comp_slope_value(qvec):
"""
Returns the fixed point increment per unit increase in binary number.
"""
frac_width = qvec[1]
return 2.**(-frac_width)
def comp_range_vec(qvec, signed=0):
"""
Computes range of real values for a given fixed point representation.
"""
min_val = comp_min_value(qvec, signed)
max_val = comp_max_value(qvec, signed)
slope = comp_slope_value(qvec)
return np.arange(min_val, max_val + slope, slope)
def hex_to_ascii(hex_val):
"""
Converts hex value to ascii string.
"""
offset = hex_val.find('x') + 1
return binascii.unhexlify(hex_val[offset:]) # .decode('hex')
def str_to_dec(str_val, base=2, signed_val=True):
"""
Method converts numerical string to unsigned decimal representation
Can take single value or vector; complex or real. Base 2 : binary
base 8 : octal, base 16 : hexadecimal
"""
if (not isinstance(str_val, np.ndarray)):
val_int = np.atleast_1d(str_val)
else:
val_int = str_val.copy()
fl = val_int.flat
sub_idx = fl.coords
complex_vals = (val_int[sub_idx][-1] == 'j')
if complex_vals:
ret_vals = np.zeros(val_int.shape, dtype=np.complex)
else:
ret_vals = np.zeros(val_int.shape, dtype=int)
num_chars = len(val_int[sub_idx])
if complex_vals:
num_chars = (len(str_val[sub_idx]) - 4) / 2
imag_lidx = num_chars + 3
imag_ridx = len(str_val[sub_idx]) - 1
if signed_val is False:
if complex_vals:
for [sub_idx, value] in np.ndenumerate(val_int):
ret_vals[sub_idx] = | np.int(value[0:num_chars], base) | numpy.int |
from __future__ import absolute_import, division, unicode_literals
import os
import io
import numpy as np
import logging
import codecs
from scipy.stats import spearmanr, pearsonr
from enteval.utils import cosine
class WikiSRSEval(object):
def __init__(self, taskpath, use_name=False, seed=1111):
logging.debug('***** Transfer task : Entity Similarity and Relatedness *****\n\n')
self.seed = seed
self.use_name = use_name
self.relate_labels, self.relate_entity1, self.relate_entity2 = self.loadFile(os.path.join(taskpath, "WikiSRS_relatedness.csv.pro"))
self.sim_labels, self.sim_entity1, self.sim_entity2 = self.loadFile(os.path.join(taskpath, "WikiSRS_similarity.csv.pro"))
self.relate_labels = np.array(self.relate_labels)
self.sim_labels = np.array(self.sim_labels)
self.samples = self.sim_entity1 + self.sim_entity2 + self.relate_entity1 + self.relate_entity2
def loadFile(self, fpath):
labels, entities1, entities2 = [], [], []
with codecs.open(fpath, 'rb', 'utf-8') as f:
for line in f:
label, entity1, entity2, entity_desc1, entity_desc2 = line.strip().split("\t")
labels.append(float(label))
if self.use_name:
entities1.append(entity1.split())
entities2.append(entity2.split())
else:
entities1.append(entity_desc1.split())
entities2.append(entity_desc2.split())
return labels, entities1, entities2
def do_prepare(self, params, prepare):
if 'similarity' in params:
self.similarity = params.similarity
else: # Default similarity is cosine
self.similarity = lambda s1, s2: np.nan_to_num(cosine(np.nan_to_num(s1), np.nan_to_num(s2)))
return prepare(params, self.samples)
def calculate_similarity(self, params, batcher, ent1, ent2):
assert len(ent1) == len(ent2), "entity 1 and entity 2 must have the same length"
ent1 = [[None, None, None] + [item] for item in ent1]
ent2 = [[None, None, None] + [item] for item in ent2]
length = len(ent1)
ent1_enc = []
ent2_enc = []
for i in range(0, length, params.batch_size):
_, enc1 = batcher(params, ent1[i:i+params.batch_size])
_, enc2 = batcher(params, ent2[i:i+params.batch_size])
ent1_enc.append(enc1)
ent2_enc.append(enc2)
ent1_enc = np.vstack(ent1_enc)
ent2_enc = | np.vstack(ent2_enc) | numpy.vstack |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-21')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-22')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-09')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*1/4, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-12')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-13', '2015-01-14')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextWithSplitAdjustedWindows(NextWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp('2015-02-10')
test_start_date = pd.Timestamp('2015-01-06', tz='utc')
test_end_date = pd.Timestamp('2015-01-12', tz='utc')
split_adjusted_asof = pd.Timestamp('2015-01-08')
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
sid_0_events = pd.DataFrame({
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp('2015-01-05'),
pd.Timestamp('2015-01-05')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
'estimate1': [1100., 1200.],
'estimate2': [2100., 2200.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# This is just an extra sid to make sure that we apply adjustments
# correctly for multiple columns when we have multiple sids.
sid_1_events = pd.DataFrame({
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp('2015-01-05'),
pd.Timestamp('2015-01-05')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-08'),
pd.Timestamp('2015-01-11')],
'estimate1': [1110., 1210.],
'estimate2': [2110., 2210.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 1,
})
return pd.concat([sid_0_events, sid_1_events])
@classmethod
def make_splits_data(cls):
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (.3, 3.),
'effective_date': (pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09')),
})
sid_1_splits = pd.DataFrame({
SID_FIELD_NAME: 1,
'ratio': (.4, 4.),
'effective_date': (pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09')),
})
return pd.concat([sid_0_splits, sid_1_splits])
@classmethod
def make_expected_timelines_1q_out(cls):
return {}
@classmethod
def make_expected_timelines_2q_out(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(
WithSplitAdjustedMultipleEstimateColumns, cls
).init_class_fixtures()
cls.timelines_1q_out = cls.make_expected_timelines_1q_out()
cls.timelines_2q_out = cls.make_expected_timelines_2q_out()
def test_adjustments_with_multiple_adjusted_columns(self):
dataset = MultipleColumnsQuartersEstimates(1)
timelines = self.timelines_1q_out
window_len = 3
class SomeFactor(CustomFactor):
inputs = [dataset.estimate1, dataset.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate1, estimate2):
assert_almost_equal(estimate1, timelines[today]['estimate1'])
assert_almost_equal(estimate2, timelines[today]['estimate2'])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
def test_multiple_datasets_different_num_announcements(self):
dataset1 = MultipleColumnsQuartersEstimates(1)
dataset2 = MultipleColumnsQuartersEstimates(2)
timelines_1q_out = self.timelines_1q_out
timelines_2q_out = self.timelines_2q_out
window_len = 3
class SomeFactor1(CustomFactor):
inputs = [dataset1.estimate1]
window_length = window_len
def compute(self, today, assets, out, estimate1):
assert_almost_equal(
estimate1, timelines_1q_out[today]['estimate1']
)
class SomeFactor2(CustomFactor):
inputs = [dataset2.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate2):
assert_almost_equal(
estimate2, timelines_2q_out[today]['estimate2']
)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est1': SomeFactor1(), 'est2': SomeFactor2()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
class PreviousWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 3),
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 3),
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 2 +
[[np.NaN, 1110.]]),
'estimate2': np.array([[np.NaN, np.NaN]] * 2 +
[[np.NaN, 2110.]])
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] +
[[np.NaN, 1110. * 4]] +
[[1100 * 3., 1110. * 4]]),
'estimate2': np.array([[np.NaN, np.NaN]] +
[[np.NaN, 2110. * 4]] +
[[2100 * 3., 2110. * 4]])
},
pd.Timestamp('2015-01-12', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] * 2 +
[[1200 * 3., 1210. * 4]]),
'estimate2': np.array([[np.NaN, np.NaN]] * 2 +
[[2200 * 3., 2210. * 4]])
}
}
@classmethod
def make_expected_timelines_2q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp('2015-01-12', tz='utc'): {
'estimate2': np.array([[np.NaN, np.NaN]] * 2 +
[[2100 * 3., 2110. * 4]])
}
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithMultipleEstimateColumns(
PreviousWithSplitAdjustedMultipleEstimateColumns
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
class NextWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate1', 'estimate2'],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp('2015-01-06', tz='utc'): {
'estimate1': np.array([[np.NaN, np.NaN]] +
[[1100. * 1/.3, 1110. * 1/.4]] * 2),
'estimate2': np.array([[np.NaN, np.NaN]] +
[[2100. * 1/.3, 2110. * 1/.4]] * 2),
},
pd.Timestamp('2015-01-07', tz='utc'): {
'estimate1': np.array([[1100., 1110.]] * 3),
'estimate2': np.array([[2100., 2110.]] * 3)
},
pd.Timestamp('2015-01-08', tz='utc'): {
'estimate1': np.array([[1100., 1110.]] * 3),
'estimate2': np.array([[2100., 2110.]] * 3)
},
pd.Timestamp('2015-01-09', tz='utc'): {
'estimate1': np.array([[1100 * 3., 1210. * 4]] * 3),
'estimate2': | np.array([[2100 * 3., 2210. * 4]] * 3) | numpy.array |
import tables
from matplotlib import patches
import matplotlib.mlab as ml
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import pickle as pickle
import os
from scipy import interpolate
import matplotlib.pyplot as plt
from PIL import Image
import astropy.io.fits as pyfits
from scipy.interpolate import griddata
import pyzdde.arraytrace as at
from collections import Counter
px = []
py = []
for i in range(-50, 51, 1):
for j in range(-50, 51, 1):
px.append(i / 50.)
py.append(j / 50.)
px = np.array(px)
py = np.array(py)
idx = (px ** 2 + py ** 2) < 1
class Transformation(tables.IsDescription):
wavelength = tables.Float32Col()
shear = tables.Float32Col()
rotation = tables.Float32Col()
scale_x = tables.Float32Col()
scale_y = tables.Float32Col()
translation_x = tables.Float32Col()
translation_y = tables.Float32Col()
def save_CCD_info_to_hdf(path, ccd):
h5file = tables.open_file(path, "a")
ccd_group = h5file.create_group(h5file.root, 'CCD', 'CCD information')
ccd_group._v_attrs.Nx = ccd.Nx
ccd_group._v_attrs.Ny = ccd.Ny
ccd_group._v_attrs.pixelsize = ccd.pixelSize
h5file.close()
def save_spectrograph_info_to_hdf(path, spec):
h5file = tables.open_file(path, "w")
spec_group = h5file.create_group(h5file.root, 'Spectrograph', "Spectrograph Information")
spec_group._v_attrs.blaze = spec.blaze
spec_group._v_attrs.gpmm = spec.grmm
spec_group._v_attrs.name = spec.name
h5file.close()
def save_transformation_to_hdf(path, res, fiber_number=1):
h5file = tables.open_file(path, "a")
gr = h5file.create_group(h5file.root, "fiber_" + str(fiber_number))
gr._v_attrs.MatricesPerOrder = res['MatricesPerOrder']
gr._v_attrs.norm_field = res['norm_field']
gr._v_attrs.sampling_input_x = res['sampling_input_x']
gr._v_attrs.field_with = res['field_width']
gr._v_attrs.field_height = res['field_height']
for order, r in res['matrices'].iteritems():
tab = h5file.create_table("/fiber_" + str(fiber_number), 'order' + str(abs(order)), Transformation,
"Affine Transformation", expectedrows=len(r), chunkshape=True)
transf = tab.row
for wl, pars in r.iteritems():
transf['wavelength'] = wl
transf['rotation'] = pars[0]
transf['scale_x'] = pars[1]
transf['scale_y'] = pars[2]
transf['shear'] = pars[3]
transf['translation_x'] = pars[4]
transf['translation_y'] = pars[5]
transf.append()
tab.flush()
h5file.close()
def save_psfs_to_hdf(path, res, fiber_number=1):
h5file = tables.open_file(path, "a")
if not (h5file.__contains__("/fiber_" + str(fiber_number))):
gr = h5file.create_group(h5file.root, "fiber_" + str(fiber_number))
else:
gr = h5file.get_node(h5file.root, "fiber_" + str(fiber_number))
for order, psfs in res.iteritems():
if not (h5file.__contains__("/fiber_" + str(fiber_number) + "/psf_order_" + str(abs(order)))):
gr = h5file.create_group("/fiber_" + str(fiber_number), "psf_order_" + str(abs(order)))
else:
gr = h5file.get_node("/fiber_" + str(fiber_number), "psf_order_" + str(abs(order)))
for wl, data in psfs.iteritems():
if not (
h5file.__contains__(
"/fiber_" + str(fiber_number) + "/psf_order_" + str(order) + "/wavelength_" + str(wl))):
ar = h5file.create_array(gr, "wavelength_" + str(wl), np.array(data[1]))
ar.attrs.wavelength = float(wl)
ar.attrs.order = int(abs(order))
for i, a in enumerate(data[0]._fields):
ar.set_attr(a, data[0][i])
def efficiency(scalingfactor, order, alpha, blaze, wl, n):
bb = np.arcsin(-np.sin(alpha) + order * wl * n * 1E-6)
return scalingfactor * np.sinc(order * (np.cos(alpha) / np.cos(alpha - blaze)) *
(np.cos(blaze) - np.sin(blaze) / np.tan((alpha + bb) / 2.))) ** 2
class Spot(object):
""" Class that describes a spot in a optical design
It basically consists of a DDEArray
"""
def __init__(self, wavelength, order, i, rd_in, rd_out, valid_only=True, circular_pupil=True):
"""
Constructor
:param wavelength: wavelength in microns
:param order: order of diffraction of the echelle grating
:param i: index of spot per order - makes it easier to create the spot_map but is probably redundant
:param rd_in: DdeArray of input rays before raytracing
:param rd_out: DdeArray of traced rays
:param valid_only: if True, only rays within a circular aperture are traced (needed for spot diagrams)
which are not vignetted
:return:
"""
self.wavelength = wavelength
self.order = order
self.i = i
# number of rays
self.Nrays = len(rd_in['z'][1:])
# restrict rays to circular pupil or not
if circular_pupil:
px = rd_in['z'][1:]
py = rd_in['l'][1:]
idx = (px ** 2 + py ** 2) <= 1.
else:
idx = np.ones(self.Nrays)
# restrict rays to non vignetted ones
if valid_only:
vig = rd_out['vigcode'][1:]
err = rd_out['error'][1:]
vig = np.logical_or(vig, err)
index = np.logical_and(vig < 1, idx)
else:
index = idx
self.hx = rd_in['x'][1:][index]
self.hy = rd_in['y'][1:][index]
self.x = rd_out['x'][1:][index]
self.y = rd_out['y'][1:][index]
self.px = rd_in['z'][1:][index]
self.py = rd_in['l'][1:][index]
self.barycenter = None
self.xy_c = None
self.rms = None
self.rms_x = None
self.rms_y = None
self._calc_barycenter()
self._calc_rms_radius()
def _calc_barycenter(self):
"""
calculate the barycenter of the spot
"""
self.barycenter = {'x': np.average(self.x),
'y': np.average(self.y)}
self.xy_c = {'x': self.x - self.barycenter['x'],
'y': self.y - self.barycenter['y']}
def _calc_rms_radius(self):
"""
calculate rms radius of the spot, radially, in x and y direction
"""
self.rms = np.std(np.sqrt(self.xy_c['x'] ** 2 + self.xy_c['y'] ** 2))
self.rms_x = np.std(np.sqrt(self.xy_c['x'] ** 2))
self.rms_y = np.std(np.sqrt(self.xy_c['y'] ** 2))
def EE_radius(self, EE=80., direction='r'):
"""
Calculate encircled energy (EE) radius of the spot
:param EE: encircled energy level in percent
:param direction: direction in which EE is calculated (radial, x and y)
:return:
"""
n = len(self.xy_c['x'])
if direction == 'r':
return np.sort(np.sqrt(self.xy_c['x'] ** 2 + self.xy_c['y'] ** 2))[int(EE / 100. * n)] * 1000.
if direction == 'x':
return np.sort(np.sqrt(self.xy_c['x'] ** 2))[int(EE / 100. * n)] * 1000.
if direction == 'y':
return np.sort(np.sqrt(self.xy_c['y'] ** 2))[int(EE / 100. * n)] * 1000.
def calc_weighted_barycenter(self, path_image=None, xy_c=None, radius=None, f=None, plot=False):
"""
Calculates the barycenter of the spot weighted with an image.
This can be used to calculate the spot barycenter weighted with a fiber far field (FF) illumination pattern.
:param path_image: path to image that contains the weights
:param xy_c: x and y coordinate of the center of the FF for interpolation, default is geometric image center
:param radius: radius on the FF image that corresponds to p=radius, default is half image width
:return: weighted barycenter
"""
if isinstance(path_image, str):
if path_image.lower().endswith('.fit') or path_image.lower().endswith('.fits'):
weight_image = pyfits.open(path_image)[0].data[xy_c['y'] - np.ceil(radius):xy_c['y'] + np.ceil(radius),
xy_c['x'] - np.ceil(radius):xy_c['x'] + np.ceil(radius)]
else:
if xy_c == None:
xy_c = {}
dims = np.shape(np.array(Image.open(path_image).convert('L')))
xy_c['y'] = dims[0] / 2.
xy_c['x'] = dims[1] / 2.
if radius == None:
radius = np.shape(np.array(Image.open(path_image).convert('L')))[0] / 2.
# open image but only select relevant parts
weight_image = np.array(Image.open(path_image).convert('L'))[
xy_c['y'] - np.ceil(radius):xy_c['y'] + np.ceil(radius),
xy_c['x'] - np.ceil(radius):xy_c['x'] + np.ceil(radius)]
# normalized x and y coordinates (correspond to Px and Py in ZEMAX)
xr = yr = np.arange(-1., 1., 1. / radius)
# interpolation function
f = interpolate.RectBivariateSpline(xr, yr, weight_image)
w = f(self.px, self.py, grid=False)
weighted_barycenter = {'x': np.average(self.x, weights=w),
'y': np.average(self.y, weights=w)}
if plot:
plt.figure()
plt.scatter(self.px, self.py, c=w, linewidth=0., marker='o')
plt.show()
return weighted_barycenter
class Order(object):
""" Class that describes an echelle order
"""
def __init__(self, m, blazeWL, minWL, maxWL, minFSRwl, maxFSRwl):
"""
Constructor
:param m: order number
:param blazeWL: blaze wavelength [micron]
:param minWL: minimum wavelength that fits on chip [micron]
:param maxWL: maximum wavelength that fits on chip [micron]
:param minFSRwl: minimum FSR wavelength [micron]
:param maxFSRwl: maximum FSR wavelength [micron]
:return: None
"""
self.m = m
self.blazeWL = blazeWL
self.minWL = minWL
self.maxWL = maxWL
self.minFSRwl = minFSRwl
self.maxFSRwl = maxFSRwl
def inFSR(self, wl):
"""
checks if wavelength lies within FSR or not
:param wl: wavelength [micron]
:return: True/False
"""
return self.maxFSRwl > wl > self.minFSRwl
def inOrder(self, wl):
"""
checks if wavelength lies in order (all chip) or not
:param wl: wavelength [micron]
:return: True/False
"""
return self.maxWL > wl > self.minWL
def info(self):
print('Order ', self.m)
print('FSR wavelength boundaries [microns]', self.minFSRwl, self.maxFSRwl)
print('Chip wavelength boundaries [microns]', self.minWL, self.maxWL)
class CCD(object):
""" CCD class, contains information about CCD detector """
def __init__(self, Nx, Ny, pixelSize, dispersionDirection='x', name=''):
"""
:param Nx: number of pixels in x - direction
:param Ny: number of pixels in y - direction
:param pixelSize: size of one pixel [micron]
:param dispersionDirection: echelle dispersion direction
:param name: name/identifier of the CCD detector
:return:
"""
self.Wx = Nx * pixelSize / 1000.
self.Wy = Ny * pixelSize / 1000.
self.Nx = Nx
self.Ny = Ny
self.pixelSize = pixelSize
self.name = name
self.xi = np.linspace(-Nx * pixelSize / 2000., Nx * pixelSize / 2000., Nx)
self.yi = np.linspace(-Ny * pixelSize / 2000., Ny * pixelSize / 2000., Ny)
self.extent = [-Nx * pixelSize / 2000.,
+Nx * pixelSize / 2000.,
-Ny * pixelSize / 2000.,
+Ny * pixelSize / 2000.]
self.shape = [[-Nx * pixelSize / 2000., -Ny * pixelSize / 2000.],
[Nx * pixelSize / 2000., -Ny * pixelSize / 2000.],
[Nx * pixelSize / 2000., Ny * pixelSize / 2000.],
[-Nx * pixelSize / 2000., Ny * pixelSize / 2000.]
]
self.dispersionDirection = dispersionDirection
class Echelle():
"""
class describing an echelle spectrograph
"""
def __init__(self, ln=None, name=''):
self.name = name
self.savePath = 'PyEchelle_' + self.name
if not os.path.exists(self.savePath):
os.makedirs(self.savePath)
# zemax surface number
# self.ln= pyz.createLink()
if ln is not None:
import pyzdde.zdde as pyz
import pyzdde.arraytrace as at # Module for array ray tracing
self.ln = ln
self.zmx_nsurf = None
# minimal/maximal order
self.minord = None
self.maxord = None
# Blaze angle in degree
self.blaze = None
# gamma angle in degree
self.gamma = None
# groves per mm
self.grmm = None
# current order
self.order = None
self.theta = 0
self.grp = None
self.tracing = []
self.x = []
self.y = []
self.orders = []
self.file = None
self.rays = []
self.wls = []
self.CCD = None
self.Orders = {}
self.spots = []
self.order_configs = {}
self.order_config_wave = {}
def setCCD(self, CCD):
self.CCD = CCD
def saveOrders(self, filename='orders.pkl'):
"""
Save Orders to file
:param filename: filename
:return: None
"""
print('save orders')
pickle.dump(self.Orders, open(self.savePath + '/' + filename, "wb"))
def saveSpectrograph(self, filename=None):
if filename == None:
filename = self.name
spec = {'blaze': self.blaze,
'gamma': self.gamma,
'theta': self.theta,
'order': self.order,
'name': self.name,
'savePath': self.savePath,
'minOrder': self.minord,
'maxOrder': self.maxord,
'grmm': self.grmm,
'grp': self.grp,
}
pickle.dump(spec, open(self.savePath + '/' + filename + '.pkl', "wb"))
def loadSpectrograph(self, filename=None):
if filename == None:
filename = self.name
spec = pickle.load(open(self.savePath + '/' + filename + '.pkl'))
self.blaze = spec['blaze']
self.gamma = spec['gamma']
self.theta = spec['theta']
self.order = spec['order']
self.minord = spec['minOrder']
self.maxord = spec['maxOrder']
self.grmm = spec['grmm']
self.grp = spec['grp']
def loadOrders(self, filename='orders.pkl'):
"""
Load Orders from file
:param filename: filename
:return:
"""
self.Orders = pickle.load(open(self.savePath + '/' + filename))
def analyseZemaxFile(self, echellename='Echelle', thetaname='theta', blazename='blaze', gammaname='gamma'):
"""
Analyses ZEMAX files and extract important parameters to specify Echelle Spectrograph.
Looks for names in comment column of ZEMAX to detect specific surfaces.
:param echellename: ZEMAX surface name of Echelle grating
:param thetaname: ZEMAX surface name of theta angle
:param blazename: ZEMAX surface name of blaze angle
:param gammaname: ZEMAX surface name of gamma angle
:return:
"""
for i in range(self.ln.zGetNumSurf()):
comm = self.ln.zGetComment(i)
if comm == echellename:
print('Echelle found ----------------------------')
self.zmx_nsurf = i
self.echelle_surface = i
# grooves per mm
self.grmm = self.ln.zGetSurfaceParameter(i, 1) * 1000.
# current order
self.order = int(self.ln.zGetSurfaceParameter(i, 2))
print('Grooves per mm', self.grmm)
print('Current order', self.order)
print('Surface number', self.zmx_nsurf)
elif comm == thetaname:
print('Theta found ------------------------------')
self.theta = float(self.ln.zGetSurfaceParameter(i, 4))
print('theta', self.theta)
elif comm == blazename:
print('blaze found ------------------------------')
b1 = abs(float(self.ln.zGetSurfaceParameter(i, 3)))
b2 = abs(float(self.ln.zGetSurfaceParameter(i, 4)))
b3 = abs(float(self.ln.zGetSurfaceParameter(i, 5)))
self.blaze = max((b1, b2, b3))
print('blaze', self.blaze)
elif comm == gammaname:
print('gamma found ------------------------------')
b1 = abs(float(self.ln.zGetSurfaceParameter(i, 3)))
b2 = abs(float(self.ln.zGetSurfaceParameter(i, 4)))
self.gamma = max((b1, b2))
print('gamma', self.gamma)
def trace(self, wave=1, hx=0, hy=0, N=101, intensity=1.):
self.ln.zGetUpdate()
self.ln.zPushLens()
Nx = Ny = int(np.sqrt(N))
rd_in = at.getRayDataArray(Nx * Ny, tType=0, mode=0)
rd_out = at.getRayDataArray(Nx * Ny, tType=0, mode=0)
k = 0
for i in np.linspace(-1., 1., Nx):
for j in np.linspace(-1., 1., Ny):
k += 1
rd_out[k].x = hx
rd_out[k].y = hy
rd_out[k].z = i # px
rd_out[k].l = j # py
rd_out[k].intensity = intensity
rd_out[k].wave = wave
rd_in[k].x = hx
rd_in[k].y = hy
rd_in[k].z = i # px
rd_in[k].l = j # py
rd_in[k].intensity = intensity
rd_in[k].wave = wave
ret = at.zArrayTrace(rd_out, timeout=5000)
return np.array(rd_in, dtype=at.DdeArrayData), np.array(rd_out, dtype=at.DdeArrayData)
def trace_rays(self, wave, FIELD):
self.ln.zGetUpdate()
self.ln.zPushLens()
numRays = 10201
rd = at.getRayDataArray(numRays, tType=0, mode=0)
# Fill the rest of the ray data array
k = 0
for i in range(-50, 51, 1):
for j in range(-50, 51, 1):
k += 1
rd[k].y = FIELD
rd[k].z = i / 50. # px
rd[k].l = j / 50. # py
rd[k].intensity = 1.0
rd[k].wave = wave
# Trace the rays
ret = at.zArrayTrace(rd, timeout=5000)
return rd
def order_to_config(self, order):
return self.order_configs[order]
def clear_configs(self):
c, cc, rc = self.ln.zGetConfig()
for i in range(cc):
self.ln.zDeleteConfig(1)
self.ln.zPushLens()
for i in range(rc):
self.ln.zDeleteMCO(1)
def clear_merit_function(self):
while (self.ln.zDeleteMFO(1) > 1):
self.ln.zDeleteMFO(1)
def set_config_and_wavelength(self, wavelength_per_order=7):
self.clear_configs()
self.ln.zSetMulticon(0, 1, 'PAR2', self.echelle_surface, 0, 0)
self.ln.zInsertMCO(2)
self.ln.zSetMulticon(0, 2, 'WAVE', 0, 0, 0)
self.order_configs = {}
for i, o in enumerate(self.Orders):
self.ln.zInsertConfig(i + 1)
self.ln.zSetMulticon(i + 1, 1, self.Orders[o].m, 0, 0, 0, 1, 0)
self.ln.zSetMulticon(i + 1, 2, self.Orders[o].blazeWL, 0, 0, 0, 1, 0)
self.order_configs[o] = i + 1
# self.ln.zInsertMFO(i + 1)
# self.ln.zSetOperandRow(i + 1, 'CONF', i+1)
c, cc, rc = self.ln.zGetConfig()
self.ln.zDeleteConfig(cc)
self.ln.zPushLens()
def clear_wavelength(self):
n = self.ln.zGetNumWave()
def set_config_and_wavelength_from_list(self, orders, wavelength, posx, posy):
self.clear_configs()
self.clear_merit_function()
self.ln.zSetMulticon(0, 1, 'PAR2', self.echelle_surface, 0, 0)
# add unique orders to multi config file
unique_orders = np.unique(np.array(orders))
self.order_configs = dict(zip(unique_orders, range(len(unique_orders))))
for i, o in enumerate(unique_orders):
self.ln.zInsertConfig(i + 1)
self.ln.zSetMulticon(i + 1, 1, o, 0, 0, 0, 1, 0)
self.order_configs[o] = i + 1
self.ln.zPushLens()
c, cc, rc = self.ln.zGetConfig()
self.ln.zDeleteConfig(cc)
self.ln.zPushLens()
# # add as many rows needed for the order with the most wavelength
n_wavelength = Counter(orders).most_common(1)[0][1]
self.ln.zSetWave(0, 1, n_wavelength)
self.ln.zGetUpdate()
#
for n in range(n_wavelength):
self.ln.zInsertMCO(n + 2)
self.ln.zSetPrimaryWave(n + 1)
self.ln.zSetMulticon(0, n + 2, 'WAVE', n + 1, n + 1, n + 1)
for i in unique_orders:
self.ln.zSetMulticon(self.order_to_config(i), n + 2, self.Orders[i].blazeWL, 0, 0, 0, 1, 0)
row_count = {}
for uo in unique_orders:
row_count[uo] = 2
for o, wl, px, py in zip(orders, wavelength, posx, posy):
config = self.order_to_config(o)
rc = row_count[o]
self.ln.zSetMulticon(config, rc, wl, 0, 0, 0, 1, 0)
self.set_merit_function(o, rc - 1, px, py)
row_count[o] += 1
self.ln.zPushLens()
def set_merit_function(self, order, wave, posx, posy, clear=False):
if clear:
self.clear_merit_function()
n = self.ln.zGetNumSurf()
cf = self.order_to_config(order)
self.ln.zInsertMFO(1)
self.ln.zSetOperandRow(1, 'REAY', n, wave, 0, 0, 0, 0, tgt=posy)
self.ln.zInsertMFO(1)
self.ln.zSetOperandRow(1, 'REAX', n, wave, 0, 0, 0, 0, tgt=posx)
self.ln.zInsertMFO(1)
self.ln.zSetOperandRow(1, 'CONF', cf)
self.ln.zPushLens()
def read_merit_function_position_difference(self, n):
REAX = []
REAY = []
dx = []
dy = []
for i in range(n):
data = self.ln.zGetOperandRow(i)
if data[0] == 'REAX':
dx.append((data[9] - data[11]) * data[10])
REAX.append(data[11])
if data[0] == 'REAY':
dy.append((data[9] - data[11]) * data[10])
REAY.append(data[11])
print("Median deviation XY: ", np.median(np.array(dx)) * 1000., np.median(np.array(dy)) * 1000.)
plt.figure()
plt.plot()
plt.axis('equal')
for x, y, dxx, dyy in zip(REAX, REAY, dx, dy):
plt.scatter(x, y)
plt.arrow(x, y, dxx * 100, dyy * 100)
plt.show()
def do_spectral_format(self, nPerOrder=7, FSRonly=True, hx=0, hy=0):
s = []
for o in list(self.Orders.values()):
print('Trace order', o.m)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o.m)
# self.ln.zPushLens()
if FSRonly:
wl = np.linspace(o.minFSRwl, o.maxFSRwl, nPerOrder)
else:
wl = np.linspace(o.minWL, o.maxWL, nPerOrder)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
self.ln.zGetUpdate()
# self.ln.zPushLens()
rayTraceData = self.ln.zGetTrace(1, 0, -1, hx, hy, 0, 0)
error, vig, x, y, z, l, m, n, l2, m2, n2, intensity = rayTraceData
s.append([o.m, w, x, y])
return s
def get_psfs(self, nPerOrder=1, fieldnumber=3, fieldposition=[0., 0.]):
psfs = {}
old_field = self.ln.zGetField(fieldnumber)
# self.ln.zSetField(fieldnumber, fieldposition[0], fieldposition[1])
for o in list(self.Orders.values()):
print('Trace order', o.m)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o.m)
wl = np.linspace(o.minWL, o.maxWL, nPerOrder)
psfs[o.m] = {}
for w in wl:
self.ln.zSetWave(1, w, 1.)
psf = self.ln.zGetPSF(which='huygens')
print(psf)
psfs[o.m][w] = psf
# restore field
self.ln.zSetField(fieldnumber, old_field.xf, old_field.yf)
return psfs
def do_affine_transformation_calculation(self, nPerOrder=10,
norm_field=[[-1, 1], [-1, -1], [1, -1], [1, 1], [0, 0]], fw=None, fh=None):
"""
Calculates Affine Matrices that describe spectrograph
The spectrograph can be described by affine transformations from the input slit to the focal plane.
an affine transofmration can be described by a 3x3 matrix.
this function calculates the 3x3 matrix per wavelength and order that matches the input slit to the focal plane
:param nPerOrder: number of wavelength steps across one order
:param norm_field: corner points and center point in normalized coordinates
:param fw: fiber/slit width [microns]
:param fh: fiber/slit height [microns]
:return:
"""
from skimage import transform as tf
sampling_input_x = int(fw)
res = {'MatricesPerOrder': nPerOrder,
'norm_field': norm_field,
'sampling_input_x': sampling_input_x}
# find field dimensions in ZEMAX
field_info = self.ln.zGetField(0)
# TODO: raise error
if field_info[0] is not 1:
print('Field coordinates have the wrong format')
zmx_fields = []
for ii in range(1, field_info[1] + 1):
field = self.ln.zGetField(ii)
zmx_fields.append([field[0], field[1]])
zmx_fields = np.array(zmx_fields)
norm_field = np.array(norm_field)
if fw is None:
fw = (np.max(zmx_fields[:, 0]) - np.min(zmx_fields[:, 0])) * 1000. # slit width in microns
fw *= (np.max(norm_field[:, 0]) - np.min(norm_field[:, 0])) / 2.
if fh is None:
fh = (np.max(zmx_fields[:, 1]) - np.min(zmx_fields[:, 1])) * 1000. # slit height in microns
fh *= (np.max(norm_field[:, 1]) - np.min(norm_field[:, 1]))
print('Field width: ' + str(fw))
print('Field height: ' + str(fh))
res['field_width'] = fw
res['field_height'] = fh
sampling_x = sampling_input_x
sampling_y = sampling_input_x * fh / fw
src = np.array(norm_field, dtype=float)
src[:, 0] -= np.min(src[:, 0])
src[:, 1] -= np.min(src[:, 1])
src[:, 0] /= np.max(src[:, 0])
src[:, 1] /= np.max(src[:, 1])
# src[:, 0] *= sampling_x
# src[:, 1] *= sampling_y
ppp = []
dst_x = []
dst_y = []
orders = []
wavelength = []
for o in list(self.Orders.values()):
print('trace order ' + str(o.m))
wl = np.linspace(o.minWL, o.maxWL, nPerOrder)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o.m) # TODO: replace with ln.zSetConfig(config_num)
# print(wl, o.m)
for w in wl:
self.ln.zSetWave(1, w, 1.)
self.ln.zGetUpdate()
for f in norm_field:
rayTraceData = self.ln.zGetTrace(1, 0, -1, f[0], f[1], 0, 0)
error, vig, x, y, z, l, m, n, l2, m2, n2, intensity = rayTraceData
dst_x.append(x)
dst_y.append(y)
orders.append(o.m)
wavelength.append(w)
# plt.figure()
# plt.scatter(dst_x, dst_y)
# plt.show()
# ppp.append(np.array(self.do_spectral_format(nPerOrder=nPerOrder, FSRonly=False, hx=f[0], hy=f[1])))
# ppp = np.array(ppp)
dst_x = np.array(dst_x)
dst_y = np.array(dst_y)
dst = np.vstack((dst_x, dst_y))
dst /= ((self.CCD.pixelSize) / 1000.)
dst += self.CCD.Nx / 2
dst = dst.reshape(2, len(dst[0]) / len(norm_field), len(norm_field)).transpose((1, 2, 0))
orders = np.array(orders)
wavelength = np.array(wavelength)
orders = orders.reshape((len(orders) / len(norm_field), len(norm_field)))
wavelength = wavelength.reshape((len(wavelength) / len(norm_field), len(norm_field)))
affine_matrices = {}
transformations = {}
for order, wavel, p in zip(orders, wavelength, dst):
params = tf.estimate_transform('affine', src, p)
if affine_matrices.has_key(order[0]):
affine_matrices[order[0]].update({wavel[0]: np.array(
[params.rotation, params.scale[0], params.scale[1], params.shear, params.translation[0],
params.translation[1]])})
else:
affine_matrices[order[0]] = {wavel[0]: np.array(
[params.rotation, params.scale[0], params.scale[1], params.shear, params.translation[0],
params.translation[1]])}
res['matrices'] = affine_matrices
return res
def walk_trough_configs(self, nWl=7, nPerSpot=5001, hx=0., hy=0.):
actC, nC, operandC = self.ln.zGetConfig()
for i in range(1, nC + 1):
for j in range(1, nWl + 1):
self.ln.zSetConfig(i)
wl = self.ln.zGetWave(j).wavelength
print(wl)
rd_in, rd_out = self.trace(j, hx=hx, hy=hy, N=nPerSpot)
o = self.ln.zGetSurfaceParameter(self.zmx_nsurf, 2)
self.spots.append(Spot(wl, o, i - 1, rd_in, rd_out))
def do_spots(self, nPerOrder=5, nOrders=5, FSRonly=True, nPerSpot=5001, hx=0, hy=0, everyNthOrder=5):
n = everyNthOrder
for o in list(self.Orders.values()):
if n < everyNthOrder:
n += 1
else:
print('Trace order', o.m)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o.m)
if FSRonly:
wl = np.linspace(o.minFSRwl, o.maxFSRwl, nPerOrder)
else:
wl = np.linspace(o.minWL, o.maxWL, nPerOrder)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
rd_in, rd_out = self.trace(hx=hx, hy=hy, N=nPerSpot)
self.spots.append(Spot(w, o.m, i, rd_in, rd_out))
n -= everyNthOrder
def do_spot_diagrams(self, order='all', nPerOrder=5, field=0):
if order == 'all':
for o in self.tracing:
if o[0] <= self.maxord and o[0] >= self.minord:
print(("Trace order...", o[0]))
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o[0])
wl = np.linspace(o[1], o[2], nPerOrder)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
asdf = self.trace_rays(1, field)
a = np.array(asdf, dtype=at.DdeArrayData)
wl = self.ln.zGetWave(self.ln.zGetPrimaryWave()).wavelength
vig = a['vigcode'][1:]
err = a['error'][1:]
vig = np.logical_and(vig, err)
index = np.logical_and(vig < 1, idx)
if np.max(index) > 0:
self.rays.append([a['x'][index], a['y'][index]])
self.wls.append(wl)
def saveSpots(self, filename='spots.pkl'):
print('save spots')
pickle.dump(self.spots, open(self.savePath + filename, "wb"))
def loadSpots(self, filename='spots.pkl'):
self.spots = pickle.load(open(self.savePath + filename))
def do_tracing(self, order='all', n=1000):
if order == 'all':
for o in self.tracing:
print(("Trace order...", o[0]))
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, o[0])
array = self.file.create_array(self.file.root, 'order' + str(o[0]), atom=np.array([3.]),
shape=(2 * 4 * n,))
wlarray = self.file.create_array(self.file.root, 'wl_order' + str(o[0]), atom=np.array([3.]),
shape=(n,))
wl = np.linspace(o[1], o[2], n)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
xy = self.ln.zGetTrace(1, 0, -1, -1, -1, 0, 0)
array[i * 4 * 2] = xy[2]
array[i * 4 * 2 + 1] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, 1, -1, 0, 0)
array[i * 4 * 2 + 2] = xy[2]
array[i * 4 * 2 + 3] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, 1, 1, 0, 0)
array[i * 4 * 2 + 4] = xy[2]
array[i * 4 * 2 + 5] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, -1, 1, 0, 0)
array[i * 4 * 2 + 6] = xy[2]
array[i * 4 * 2 + 7] = xy[3]
wlarray[i] = w
self.file.flush()
self.file.close()
else:
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, self.tracing[0][0])
array = self.file.create_array(self.file.root, 'order' + str(self.tracing[0][0]), atom=np.array([3.]),
shape=(2 * 4 * n,))
wl = np.linspace(self.tracing[0][1], self.tracing[0][2], n)
for i, w in enumerate(wl):
self.ln.zSetWave(1, w, 1.)
xy = self.ln.zGetTrace(1, 0, -1, -1, -1, 0, 0)
array[i * 4 * 2] = xy[2]
array[i * 4 * 2 + 1] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, 1, -1, 0, 0)
array[i * 4 * 2 + 2] = xy[2]
array[i * 4 * 2 + 3] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, 1, 1, 0, 0)
array[i * 4 * 2 + 4] = xy[2]
array[i * 4 * 2 + 5] = xy[3]
xy = self.ln.zGetTrace(1, 0, -1, -1, 1, 0, 0)
array[i * 4 * 2 + 6] = xy[2]
array[i * 4 * 2 + 7] = xy[3]
self.file.close()
def setFile(self, name='MaroonXblue.h5', mode='w'):
self.file = tables.open_file(name, mode=mode)
def wavelength_to_order(self, wl):
"""
Returns the order in which the wavelength appears.
Returns empty list if wavelength is outside the spectral range.
Returns a list of tuples, with the order number and a string indicating whether it is within FSR or not.
:param wl: wavelength [micron]
:return: list of tuples (order number, 'FSR'/'CCD')
"""
res = []
for o in list(self.Orders.values()):
if o.inFSR(wl):
res.append((o.m, 'FSR'))
elif o.inOrder(wl):
res.append((o.m, 'CCD'))
return res
def calc_wl(self):
print('Calc wavelength')
def find_lmin(order, dwl=0.0001):
wl = self.ln.zGetWave(1)[0]
vig = False
wlmin = wl
while vig < 1:
wl = wl - dwl
self.ln.zSetWave(1, wl, 1.)
xy = self.ln.zGetTrace(1, 0, -1, 0, 0, 0, 0)
vig = np.logical_or(xy[1], xy[0])
else:
print('vignetting at surface ', xy[1], self.order, wl)
wlmin = wl
xmin = xy[2]
ymin = xy[3]
self.x.append(xmin)
self.y.append(ymin)
return wlmin, xmin, ymin
def find_lmax(order, dwl=0.0001):
wl = self.ln.zGetWave(1)[0]
vig = False
wlmin = wl
while vig < 1:
wl = wl + dwl
self.ln.zSetWave(1, wl, 1.)
xy = self.ln.zGetTrace(1, 0, -1, 0, 0, 0, 0)
vig = np.logical_or(xy[1], xy[0])
else:
print('vignetting at surface ', xy[1], self.order, wl)
wlmin = wl
xmin = xy[2]
ymin = xy[3]
self.x.append(xmin)
self.y.append(ymin)
return wlmin, xmin, ymin
gamma_rad = np.deg2rad(self.gamma)
blaze_rad = np.deg2rad(self.blaze)
theta_rad = np.deg2rad(self.theta)
self.grp = 1000. / self.grmm
alpha = blaze_rad + theta_rad
beta = blaze_rad - theta_rad
c0 = self.grp * np.cos(gamma_rad)
c1 = c0 * (np.sin(alpha) + np.sin(beta))
c2 = c0 * np.cos(beta)
c3 = self.grp * np.cos(blaze_rad) * (1. - np.tan(self.theta) * np.tan(blaze_rad))
print(self.order + 1, c1 / (self.order + 1))
self.ln.zSetWave(0, 1, 1)
self.ln.zPushLens()
vig = False
# find max order
o_working = self.order
print('find max order --------------------')
while vig < 1 and abs(self.order) < abs(self.maxord):
if self.order > 0:
self.order += 1
else:
self.order -= 1
blazeWL = abs(c1 / self.order)
print('Order: ', self.order, 'Blaze wl: ', blazeWL)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, self.order)
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zGetUpdate()
self.ln.zPushLens()
xy = self.ln.zGetTrace(1, 0, -1, 0, 0, 0, 0)
vig = np.logical_or(xy[1], xy[0])
if vig < 1:
self.x.append(xy[2])
self.y.append(xy[3])
self.orders.append(self.order)
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
wmax = find_lmax(self.order)[0]
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
wmin = find_lmin(self.order)[0]
print("Order added ", self.order, wmin, wmax, blazeWL)
self.Orders[self.order] = Order(self.order, blazeWL, wmin, wmax,
blazeWL - blazeWL / self.order / 2.,
blazeWL + blazeWL / self.order / 2.)
# find min order
vig = False
self.order = o_working + 1
print('find min order')
while vig < 1 and abs(self.order) > abs(self.minord):
print('test order', self.order, self.minord)
if self.order > 0:
self.order -= 1
else:
self.order += 1
blazeWL = abs(c1 / self.order)
self.ln.zSetSurfaceParameter(self.zmx_nsurf, 2, self.order)
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
xy = self.ln.zGetTrace(1, 0, -1, 0, 0, 0, 0)
vig = np.logical_or(xy[1], xy[0])
if vig < 1:
print('ok')
self.orders.append(self.order)
self.x.append(xy[2])
self.y.append(xy[3])
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
wmax = find_lmax(self.order)[0]
self.ln.zSetWave(1, blazeWL, 1.)
self.ln.zPushLens()
wmin = find_lmin(self.order)[0]
self.Orders[self.order] = Order(self.order, blazeWL, wmin, wmax,
blazeWL - blazeWL / self.order / 2.,
blazeWL + blazeWL / self.order / 2.)
def spots_on_CCD(self):
plt.figure()
for s in self.spots:
plt.scatter(s.x, s.y)
plt.show()
def EE_map(self, direction='r', plotSpots=True, zoom=150, save='', vmax=15., vmin=0., hx=0, hy=0, showplot=False,
EE_ratio=80., additional_spots=[]):
"""
generates encircled energy map from traced spots.
:param direction: 'r', 'x' or 'y'
:param plotSpots: plots spot diagramms as an overlay
:param zoom: zoom of the individual spot diagrams
:return:
"""
print('EE map')
fig, ax = plt.subplots()
X = []
Y = []
R = []
for s in self.spots:
if np.mean(s.hx) == hx:
if np.mean(s.hy) == hy:
X.append(s.barycenter['x'])
Y.append(s.barycenter['y'])
R.append(s.EE_radius(direction=direction, EE=EE_ratio))
if plotSpots:
if np.mean(s.hx) == hx:
if np.mean(s.hy) == hy:
ax.scatter(s.barycenter['x'] + zoom * s.xy_c['x'], -s.barycenter['y'] + zoom * s.xy_c['y'],
s=.2, facecolor='black', lw=0)
X = np.array(X)
Y = np.array(Y)
R = np.array(R)
xi = np.linspace(-self.CCD.Wx / 2., self.CCD.Wx / 2., 101)
yi = np.linspace(-self.CCD.Wy / 2., self.CCD.Wy / 2., 101)
zi = griddata((X, Y), R, (xi[None, :], yi[:, None]), method='linear')
ax.set_xlim((np.min(xi), np.max(xi)))
ax.set_ylim((np.min(yi), np.max(yi)))
ax.set_xlabel('Detector x [mm]')
ax.set_ylabel('Detector y [mm]')
im = ax.imshow(zi, interpolation='nearest', extent=[np.min(xi), np.max(xi), | np.min(yi) | numpy.min |
import cv2
import random
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
from preprocess.datagen import label_generator
def rotation(image, prob, keys):
""" Brightness, rotation, and scaling shear transformation """
aug = iaa.Sequential()
aug.add(iaa.Multiply(random.uniform(0.25, 1.5)))
aug.add(iaa.Affine(rotate=random.uniform(-180, 180),
scale=random.uniform(.7, 1.1),
shear=random.uniform(-25, 25),
cval=(0, 255)))
seq_det = aug.to_deterministic()
image_aug = seq_det.augment_images([image])[0]
keys = ia.KeypointsOnImage([ia.Keypoint(x=keys[0], y=keys[1]),
ia.Keypoint(x=keys[2], y=keys[3]),
ia.Keypoint(x=keys[4], y=keys[5]),
ia.Keypoint(x=keys[6], y=keys[7]),
ia.Keypoint(x=keys[8], y=keys[9])], shape=image.shape)
keys_aug = seq_det.augment_keypoints([keys])[0]
k = keys_aug.keypoints
output = [k[0].x, k[0].y, k[1].x, k[1].y, k[2].x, k[2].y, k[3].x, k[3].y, k[4].x, k[4].y]
index = 0
for i in range(0, len(prob)):
output[index] = output[index] * prob[i]
output[index + 1] = output[index + 1] * prob[i]
index = index + 2
output = | np.array(output) | numpy.array |
import glob
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as spst
from scipy.integrate import odeint
from hmc import summarize
from hmc.applications.fitzhugh_nagumo import fn_dynamics
from load_data import load_data
def euclidean_samples():
num_samples = [1000, 10000, 100000]
euclid = {}
for ns in num_samples:
d = {}
fns = sorted(glob.glob(os.path.join('samples', '*num-samples-{}-*euclidean*'.format(ns))))
for f in fns:
ss = f.split('-step-size-')[1].split('-')[0]
ss = float(ss)
with open(f, 'rb') as g:
d[ss] = pickle.load(g)
euclid[ns] = d
return euclid
def iid_samples():
iid = []
with open(os.path.join('data', 'samples.pkl'), 'rb') as f:
iid.append(pickle.load(f))
with open(os.path.join('data', 'samples-{}.pkl'.format(1)), 'rb') as f:
iid.append(pickle.load(f))
return iid
def riemannian_samples(newton_momentum=False, newton_position=False):
num_samples = [1000, 10000, 100000]
rmn = {}
for ns in num_samples:
d = {}
fns = sorted(glob.glob(os.path.join('samples', '*num-steps-6*-num-samples-{}-*riemannian*partial-momentum-0.0*-correct-True*newton-momentum-{}*newton-position-{}*'.format(ns, newton_momentum, newton_position))))
for f in fns:
t = f.split('-thresh-')[1].split('-m')[0]
t = float(t)
with open(f, 'rb') as g:
d[t] = pickle.load(g)
rmn[ns] = d
return rmn
def fitzhugh_nagumo():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
y, time, sigma, state = load_data()
rkeys = sorted(rmn.keys(), reverse=False)
ekeys = sorted(euclid.keys(), reverse=False)
m = len(rkeys) + len(ekeys)
fig = plt.figure(figsize=(30, 5))
for i, t in enumerate(ekeys):
s = euclid[t]['samples']
yh = []
for j in range(0, len(s), 100):
params = tuple(s[j])
yh.append(odeint(fn_dynamics, state, time, params))
yh = np.array(yh)
ax = fig.add_subplot(1, m, i+1)
ax.plot(time, yh[..., 0].T, '-', color='tab:blue', alpha=0.1)
ax.plot(time, yh[..., 1].T, '-', color='tab:orange', alpha=0.1)
ax.plot(time, y[..., 0], '.', color='tab:blue', markersize=2)
ax.plot(time, y[..., 1], '.', color='tab:orange', markersize=2)
ax.set_ylim((-3, 3))
ax.set_title('Euclid. {:.0e}'.format(t), fontsize=35)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
for i, t in enumerate(rkeys):
s = rmn[t]['samples']
yh = []
for j in range(0, len(s), 100):
params = tuple(s[j])
yh.append(odeint(fn_dynamics, state, time, params))
yh = np.array(yh)
ax = fig.add_subplot(1, m, i+len(ekeys)+1)
ax.plot(time, yh[..., 0].T, '-', color='tab:blue', alpha=0.1)
ax.plot(time, yh[..., 1].T, '-', color='tab:orange', alpha=0.1)
ax.plot(time, y[..., 0], '.', color='tab:blue', markersize=2)
ax.plot(time, y[..., 1], '.', color='tab:orange', markersize=2)
ax.set_ylim((-3, 3))
ax.set_title('Thresh. {:.0e}'.format(t), fontsize=35)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
fig.tight_layout()
fig.savefig(os.path.join('images', 'fitzhugh-nagumo.png'))
def effective_sample_size():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
labels = ['Euclidean {}'.format(t) for t in ekeys] + ['Threshold {:.0e}'.format(t) for t in rkeys]
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(111)
num_breaks = 20
ess = {}
for t in ekeys:
breaks = np.split(euclid[t]['samples'], num_breaks, axis=0)
k = 'euclid-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min()
ess[k].append(m)
ax.violinplot([ess[k] for k in ess.keys()], showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(rmn[t]['samples'], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min()
ess[k].append(m)
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 2, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.axvline(len(ekeys) + 0.5, color='black', linestyle='--')
ax.set_xlabel('')
ax.set_ylabel('Min. ESS', fontsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(axis='x', labelsize=16)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'minimum-ess.pdf'))
def effective_sample_size_per_second():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
nm_rmn = riemannian_samples(True)[100000]
nb_rmn = riemannian_samples(True, True)[100000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
labels = ['Euclid. {}'.format(t) for t in ekeys] + ['Thresh. {:.0e}'.format(t) for t in rkeys]
for vidx in range(1, 4):
labels = ['Euclid. {}'.format(t) for t in ekeys] + ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure()
ax = fig.add_subplot(111)
num_breaks = 20
ess = {}
for t in ekeys:
breaks = np.split(euclid[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'euclid-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (euclid[t]['time'] / num_breaks)
ess[k].append(m)
ax.violinplot([ess[k] for k in ess.keys()], showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(rmn[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (rmn[t]['time'] / num_breaks)
ess[k].append(m)
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 2, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.axvline(len(ekeys) + 0.5, color='black', linestyle='--')
ax.set_xlabel('')
ax.set_ylabel('ESS / Sec.', fontsize=20)
ax.tick_params(axis='x', labelsize=20)
ax.tick_params(axis='y', labelsize=20)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'minimum-ess-per-second-{}.pdf'.format(vidx)))
labels = ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure()
ax = fig.add_subplot(111)
num_breaks = 20
ess = {}
for t in rkeys:
breaks = np.split(rmn[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (rmn[t]['time'] / num_breaks)
ess[k].append(m)
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(nm_rmn[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (nm_rmn[t]['time'] / num_breaks)
ess[k].append(m)
vpc = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
breaks = np.split(nb_rmn[t]['samples'][:, [-vidx]], num_breaks, axis=0)
k = 'rmn-{}'.format(t)
ess[k] = []
for i, b in enumerate(breaks):
metrics = summarize(b)
m = metrics['ess'].min() / (nb_rmn[t]['time'] / num_breaks)
ess[k].append(m)
vpd = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.set_xlabel('')
ax.set_ylabel('ESS / Sec.', fontsize=20)
ax.tick_params(axis='x', labelsize=20)
ax.tick_params(axis='y', labelsize=20)
ax.grid(linestyle=':')
if vidx == 1:
ax.legend([vpb["bodies"][0], vpc["bodies"][0], vpd["bodies"][0]], [r'Fixed Point', r'Newton (Mom.)', r'Newton (Mom. and Pos.)'], fontsize=16, loc='upper left')
fig.tight_layout()
fig.savefig(os.path.join('images', 'minimum-ess-per-second-vs-newton-{}.pdf'.format(vidx)))
def kolmogorov_smirnov():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
nm_rmn = riemannian_samples(True)[100000]
nb_rmn = riemannian_samples(True, True)[100000]
iid = iid_samples()
num_iid_ks = 100
iid_ks = np.zeros(num_iid_ks)
x, y = iid[0], iid[1]
for i in range(num_iid_ks):
u = np.random.normal(size=x.shape[-1])
u = u / np.linalg.norm(u)
iid_ks[i] = spst.ks_2samp(x@u, y@u).statistic
print(iid_ks)
summarize(x)
summarize(y)
summarize(rmn[1e-8]['samples'])
print(list(rmn.keys()))
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
labels = ['I.I.D.'] + ['Euclid. {}'.format(t) for t in ekeys] + ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(111)
ax.violinplot([np.log10(iid_ks)], showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in ekeys:
k = 'euclid-{}'.format(t)
ess[k] = np.log10(euclid[t]['ks'])
vpa = ax.violinplot([ess[k] for k in ess.keys()], positions=np.array([2.0]), showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
k = 'rmn-{}'.format(t)
ess[k] = np.log10(rmn[t]['ks'])
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 3, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels, rotation=90, ha='right', fontsize=16)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.axvline(len(ekeys) + 1.5, color='black', linestyle='--')
ax.set_xlabel('')
ax.set_ylabel('KS Statistic', fontsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'kolmogorov-smirnov.pdf'))
labels = ['Thresh. {:.0e}'.format(t) for t in rkeys]
fig = plt.figure()
ax = fig.add_subplot(111)
ess = {}
for t in rkeys:
k = 'rmn-{}'.format(t)
ess[k] = np.log10(rmn[t]['ks'])
vpb = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
k = 'rmn-{}'.format(t)
ess[k] = np.log10(nm_rmn[t]['ks'])
vpc = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ess = {}
for t in rkeys:
k = 'rmn-{}'.format(t)
ess[k] = np.log10(nb_rmn[t]['ks'])
vpd = ax.violinplot([ess[k] for k in ess.keys()], positions=np.arange(len(rkeys)) + 1, showmeans=True, showmedians=True, showextrema=False)
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(['' for l in labels])
ax.set_xticklabels(labels, rotation=90, ha='right', fontsize=24)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.set_xlabel('')
ax.set_ylabel('KS Statistic', fontsize=30)
ax.tick_params(axis='y', labelsize=24)
ax.grid(linestyle=':')
fig.tight_layout()
fig.savefig(os.path.join('images', 'kolmogorov-smirnov-vs-newton.pdf'))
def mmd():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
num_thresholds = len(rkeys)
thresholds = np.array(rkeys)
emmd = np.log10(np.abs(np.array([euclid[k]['mmd'] for k in ekeys])))
rmmd = np.log10(np.abs(np.array([rmn[k]['mmd'] for k in rkeys])))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rmmd, '.-')
for v in emmd:
ax.axhline(v, color='k')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(0, num_thresholds))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.grid(linestyle=':')
ax.set_xlabel(r'$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel(r'$\log_{10}|\mathrm{MMD}^2|$ Estimate', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'mmd.pdf'))
def wasserstein_sliced():
euclid = euclidean_samples()[100000]
rmn = riemannian_samples()[100000]
ekeys = sorted(euclid.keys(), reverse=False)
rkeys = sorted(rmn.keys(), reverse=False)
num_thresholds = len(rkeys)
thresholds = np.array(rkeys)
esw = np.log10(np.abs(np.array([euclid[k]['sw'] for k in ekeys])))
rsw = np.log10(np.abs(np.array([rmn[k]['sw'] for k in rkeys])))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rsw, '.-')
for v in esw:
ax.axhline(v, color='k')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(0, num_thresholds))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.grid(linestyle=':')
ax.set_xlabel(r'$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel(r'$\log_{10}$ Sliced Wasserstein', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'sw.pdf'))
def volume_preservation():
euclid = euclidean_samples()
rmn = riemannian_samples()
num_thresholds = 9
thresholds = np.logspace(-num_thresholds, -1, num_thresholds)
dat = [rmn[100000][t]['jacdet'][1e-5] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Vol. Pres. Err.', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'jacobian-determinant.pdf'))
fig = plt.figure()
ax = fig.add_subplot(111)
bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in bp['boxes']:
patch.set(facecolor='tab:blue')
nm_rmn = riemannian_samples(True)
dat = [nm_rmn[100000][t]['jacdet'][1e-5] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
nm_bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in nm_bp['boxes']:
patch.set(facecolor='tab:red')
nb_rmn = riemannian_samples(True, True)
dat = [nb_rmn[100000][t]['jacdet'][1e-5] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
nb_bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in nb_bp['boxes']:
patch.set(facecolor='tab:green')
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Vol. Pres. Err.', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'jacobian-determinant-vs-newton.pdf'))
perturb = sorted(rmn[100000][1e-9]['jacdet'].keys())
num_perturb = len(perturb)
dat = [rmn[100000][1e-9]['jacdet'][p] for p in perturb]
dat = [_[~np.isnan(_)] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_perturb + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in perturb], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, num_perturb + 0.75)
ax.set_xlabel('$\log_{10}$ Perturbation', fontsize=30)
ax.set_ylabel('$\log_{10}$ Volume Preservation Error', fontsize=20)
fig.tight_layout()
fig.savefig(os.path.join('images', 'perturbation.pdf'))
def reversibility():
euclid = euclidean_samples()
rmn = riemannian_samples()
num_thresholds = 9
thresholds = np.logspace(-num_thresholds, -1, num_thresholds)
dat = [rmn[100000][t]['absrev'] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Abs. Rev. Error', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'absolute-reversibility.pdf'))
fig = plt.figure()
ax = fig.add_subplot(111)
bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in bp['boxes']:
patch.set(facecolor='tab:blue')
nm_rmn = riemannian_samples(True)
dat = [nm_rmn[100000][t]['absrev'] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
nm_bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in nm_bp['boxes']:
patch.set(facecolor='tab:red')
nb_rmn = riemannian_samples(True, True)
dat = [nb_rmn[100000][t]['absrev'] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
nb_bp = ax.boxplot(dat, notch=True, patch_artist=True)
for patch in nb_bp['boxes']:
patch.set(facecolor='tab:green')
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Abs. Rev. Err.', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'absolute-reversibility-vs-newton.pdf'))
dat = [rmn[100000][t]['relrev'] for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, num_thresholds + 1))
ax.set_xticklabels(['{:.0f}'.format(np.log10(t)) for t in thresholds], fontsize=24)
ax.tick_params(axis='y', labelsize=24)
ax.set_xlim(0.25, len(thresholds) + 0.75)
ax.set_xlabel('$\log_{10}$ Threshold', fontsize=30)
ax.set_ylabel('$\log_{10}$ Rel. Rev. Error', fontsize=30)
fig.tight_layout()
fig.savefig(os.path.join('images', 'relative-reversibility.pdf'))
def momentum_fixed_point():
euclid = euclidean_samples()
rmn = riemannian_samples()
num_thresholds = 9
thresholds = np.logspace(-num_thresholds, -1, num_thresholds)
dat = [np.log10(rmn[100000][t]['nfp_mom']) for t in thresholds]
dat = [_[~np.isnan(_)] for _ in dat]
dat = [_[np.random.permutation(len(_))[:10000]] for _ in dat]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot(dat, notch=True)
ax.grid(linestyle=':')
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks( | np.arange(1, num_thresholds + 1) | numpy.arange |
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_Scripting.ipynb (unless otherwise specified).
__all__ = ['qshell', 'get_fpgas', 'get_insts', 'begin_mem', 'end_mem', 'read', 'format_mem', 'write', 'read_write',
'read_write_all', 'program_proj', 'compile_proj', 'archive_proj', 'back_annotate', 'write_qsf', 'lut_mask',
'analyze_timing']
# Cell
import warnings
with warnings.catch_warnings(): #ignore warnings
warnings.simplefilter("ignore")
import sidis
import mif
import quartustcl
import numpy as np
import os
from typing import Optional, Tuple, Dict, Callable, Union
import functools
from functools import wraps
# Cell
def qshell(func):
'''Function decorator that opens quartustcl shell if none open.'''
@wraps(func)
def wrap(q=None,*args,**kwargs):
if q is None:
with quartustcl.QuartusTcl() as q:
result = func(q,*args,**kwargs)
else:
result = func(q,*args,**kwargs)
return result
return wrap
# Cell
@qshell
def get_fpgas(q : Optional[quartustcl.QuartusTcl] = None,
parse : bool = False):
'''
Get all connected FPGAs and SOCs as lists of hardware names `hwnames` and device names `devnames`.
Uses `q` shell if given, otherwise opens new shell. If `parse`, extracts only the FPGA, ignoring SoCs.
Returns a dict keyed by hardware port (e.g USB) and valued with devices (e.g FPGA/SOC).
'''
hwnames = q.parse(q.get_hardware_names())
devnames = [q.parse(q.get_device_names(hardware_name=h)) for h in hwnames]
if parse:
devnames=[d[-1] for d in devnames] #SoC is always first, if it exists
return dict(list(zip(hwnames,devnames)))
# Cell
@qshell
def get_insts(q = None,
N_levels=2,
hwnames=None,
devnames=None):
'''
Get all memory instances from `hwname` and `devname`. See `QuartusTcl` doc for `N_levels`.
'''
if hwnames is None or devnames is None:
device_dict=get_fpgas(parse=True)
hwnames=list(device_dict.keys())
devnames=list(device_dict.values())
'Finds instance index given a name `inst_name` (string).'
memories={}
for hwname,devname in zip(hwnames,devnames):
memories_raw = q.get_editable_mem_instances(hardware_name=hwname,\
device_name=devname)
memories[hwname]=q.parse(memories_raw, levels=N_levels)
return memories
# Cell
def begin_mem(q = None,
hw='DE-SoC [USB-1]',
dev='@2: 5CSEBA6(.|ES)/5CSEMA6/.. (0x02D020DD)'):
'''
Open a memory edit. If one is already open, pass.
'''
try:
q.begin_memory_edit(hardware_name=hw, device_name=dev)
except:
pass
def end_mem(q = None):
'''
End a memory edit. If there are none open, pass.
'''
try:
q.end_memory_edit()
except:
pass
# Cell
@qshell
def read(q=None,
inst=0,
hw='DE-SoC [USB-1]',
dev='@2: 5CSEBA6(.|ES)/5CSEMA6/.. (0x02D020DD)',
begin=True,
end=True,
fname=None,
delete_mif=True):
'''Reads memory from an instance `inst` into an array `data`.
Option `delete_mif` will delete temporary .mif file if set to `True`.'''
if begin:
begin_mem(q,hw=hw,dev=dev)
fname=fname or 'read_inst{0}'.format(inst)
q.save_content_from_memory_to_file(
instance_index=inst,
mem_file_path=fname,
mem_file_type='mif')
with open(fname, 'r') as f:
data = mif.load(f)
f.close()
if delete_mif:
os.remove(fname)
if end:
end_mem(q)
return np.array(data).astype(float).astype(int)
# Cell
def format_mem(data,bits=None):
'''
Format input data to nested array required for memory
'''
if (type(data) is int) or (type(data) is float):
data=sidis.num2ar(data,bits=bits)
data=np.array(data).astype(int)
if len(data.shape)==1: #if not matrix, convert for tcl
data=np.expand_dims(data,axis=0)
return data
# Cell
@qshell
def write(q=None,
inst=0,
data=1,
hw='DE-SoC [USB-1]',
dev='@2: 5CSEBA6(.|ES)/5CSEMA6/.. (0x02D020DD)',
bits=None,
begin=True,
end=True,
fname=None,
delete_mif=True):
'''Writes `data` array to memory instance `inst`.
Option `delete_mif` will delete temporary .mif file if set to `True`.'''
data = format_mem(data=data,bits=bits)
fname=fname or 'write_inst{0}'.format(inst)
if begin:
begin_mem(q,hw,dev)
with open(fname, 'w') as f:
mif.dump(data, f)
f.close()
q.update_content_to_memory_from_file(
instance_index=inst,
mem_file_path=fname,
mem_file_type='mif')
if end:
end_mem(q)
if delete_mif:
os.remove(fname)
# Cell
@qshell
def read_write(q=None,
args=[[0,'w',1,1],[0,'r']],
hw='DE-SoC [USB-1]',
dev='@2: 5CSEBA6(.|ES)/5CSEMA6/.. (0x02D020DD)',
reps=1,
begin=True,
end=True):
'''
Read/write using `args`, a list of lists/tuples each of the form
(instance index, 'r' or 'w', data to write if 'w', bits to use if 'w').
Can repeat operation up to `reps`.
'''
data=[[] for i in range(reps)]
if begin:
begin_mem(q=q,hw=hw,dev=dev)
for i in range(reps):
for tup in args:
inst=tup[0]
if tup[1]=='r':
data[i]+=[read(q=q,inst=inst,hw=hw,dev=dev,begin=False,end=False)]
elif tup[1]=='w':
x=tup[2]
if len(tup)==3:
b=None
else:
b=tup[3]
x=format_mem(data=x,bits=b)
write(q=q,inst=inst,data=x,hw=hw,dev=dev,begin=False,end=False)
if end:
end_mem(q)
if data!=[]:
data=np.squeeze(data).astype(int)
if np.shape(data)==():
data= | np.array([data]) | numpy.array |
import numpy as np
try:
import cairo
except:
import cairocffi as cairo
from enum import Enum
import copy
from animlib.utils.points import convertToPoints, sliceBezier
from animlib.utils.colors import convertToColor, ColorComponent
class Center(Enum):
BY_POINTS = 0
BY_OUTLINE = 1
class Direction(Enum):
BELOW = (0, 1)
ABOVE = (0, -1)
LEFT = (-1, 0)
RIGHT = (1, 0)
POSITION = (0, 0)
def getPoint(self):
return convertToPoints(self.value)
class Base():
"""
Defines the basic geometry using points, stroke and fill information
"""
def __init__(self, *args, **kwargs):
assert len(args)==0, "for Base objects, each argument must be named"
self.clearPoints()
self._paths = []
self._fillGradient = None
self._fillColor = np.array((1.0, 1.0, 1.0, 1.0))
self._strokeGradient = None
self._strokeColor = np.array((1.0, 1.0, 1.0, 1.0))
self._strokeWidth = 0.10
self._isHidden = False
for key in kwargs:
arg = kwargs[key]
if key in ["points", "pts"]:
self.addPoint(convertToPoints(arg))
elif key in ["fillColor", "fill_color"]:
self.setFill(color=arg)
elif key in ["strokeColor", "stroke_color"]:
self.setStroke(color=arg)
elif key in ["strokeWidth", "stroke_width"]:
self.setStroke(width=arg)
elif key in ["hidden", "isHidden", "is_hidden"]:
self._isHidden = arg
def __repr__(self):
return "{}[{}]: 0x{:x}".format(
self.__class__.__name__,
"H" if self._isHidden else "V",
id(self))
def copy(self):
""" Returns a copy (i.e. deep copy) of the object """
return copy.deepcopy(self)
def hide(self, isHidden=True):
""" Hides the geometry from being drawn """
self._isHidden = isHidden if isinstance(isHidden, bool) else True
def show(self, isShown=True):
""" Shows the geometry when being drawn """
self._isHidden = not isShown if isinstance(isShown, bool) else False
def addPoint(self, point):
""" Adds one ore more points """
if isinstance(point, (list, tuple)):
[self.addPoint(p) for p in point]
elif isinstance(point, str):
point = convertToPoints(point)
elif not isinstance(point, np.ndarray) and np.size(point, 1) != 2:
return
assert len(self._paths) > 0, "Object has no path"
self._paths[-1] = np.concatenate((self._paths[-1], point), 0)
def duplicatePath(self, idx=None):
if not isinstance(idx, int):
idx = np.random.randint(len(self._paths))
self._paths.insert(idx, np.array(self._paths[idx]))
def pathsMatch(self, target):
if not isinstance(target, Base):
raise Exception("can only compare a target of type Base")
return self.getNumPaths() == target.getNumPaths()
def pointsOfPathsMatch(self, target):
if not self.pathsMatch(target):
raise Exception("number of paths of target must match Base")
r = [] # result
for p1, p2 in zip(self._paths, target._paths):
r += [np.size(p1, 0) == | np.size(p2, 0) | numpy.size |
import os
import os.path
import json
import cv2
import numpy as np
from modules.draw import Plotter3d, draw_poses
from modules.parse_poses import parse_poses
from modules.myconst import *
import logging
# define RendererContainer class to hold all renderer parameters
class RendererContainer:
def __init__(self, canvas_3d = [], canvas_3d_window_name = [], plotter = [], frame_window_name = [], edges = []):
self.canvas_3d = canvas_3d
self.canvas_3d_window_name = canvas_3d_window_name
self.plotter = plotter
self.frame_window_name = frame_window_name
self.edges = edges
# define PosesContainer class to hold all output args from get_poses
class PosesContainer:
def __init__(self, poses_2d = [], poses_3d = [], poses_id = [], poses_id_last = [], pid = [], poses_2d_raw = []):
self.poses_2d = poses_2d
self.poses_3d = poses_3d
self.poses_id = poses_id
self.poses_id_last = poses_id_last
self.pid = pid
self.poses_2d_raw = poses_2d_raw
def setup_renderer(frame_width, frame_height, canvas_3d_window_name, frame_window_name):
logging.debug(f"@{__name__}: set up renderer windows with frame [width{frame_width}, height{frame_height}]")
#setup the canvas_3d window to show skeletons
canvas_3d = np.zeros((frame_height, frame_width, 3), dtype=np.uint8)
plotter = Plotter3d(canvas_3d.shape[:2], origin=(0.5, 0.5), scale=min(canvas_3d.shape[:2])/CANVAS3D_AXIS_LEN)
cv2.namedWindow(canvas_3d_window_name)
cv2.setMouseCallback(canvas_3d_window_name, Plotter3d.mouse_callback)
cv2.namedWindow(frame_window_name)
return RendererContainer(canvas_3d, canvas_3d_window_name, plotter, frame_window_name)
def render_poses(frame, posedata, renderer_param, video_param):
#confirm poses has value
if len(posedata.poses_2d) != len(posedata.poses_3d):
raise ValueError('poses_2d and poses_3d need to have same length!')
# setup for text drawing on the frame
font_face = cv2.FONT_HERSHEY_COMPLEX
font_color = (0, 0, 255)
font_thickness = 1
num_poses = len(posedata.poses_2d)
if num_poses > 0:
#update skeleton plot
renderer_param.edges = (Plotter3d.SKELETON_EDGES +
NUMKPTS_PANOPTIC * np.arange(posedata.poses_3d.shape[0]).reshape((-1, 1, 1))).reshape((-1, 2))
renderer_param.plotter.plot(renderer_param.canvas_3d, posedata.poses_3d, renderer_param.edges)
cv2.imshow(renderer_param.canvas_3d_window_name, renderer_param.canvas_3d)
#update pose plot and pose_id
if (video_param.use_unitycam):
(lt_x, lt_y, rb_x, rb_y), best_scale, baseline = findFontLocate('cam: 100 fps', font_face, font_thickness, frame)
draw_poses(frame, posedata.poses_2d)
for pose_idx in range(num_poses):
cv2.putText(frame, 'id:{0:3d}'.format(int(posedata.pid[pose_idx])), \
(max(int(posedata.poses_2d[pose_idx][3]), 20), max(20, int(2*posedata.poses_2d[pose_idx][4]-posedata.poses_2d[pose_idx][1]))), \
font_face, best_scale, (255, 255, 0), font_thickness, cv2.LINE_8)
#update frame rates and pose view
dy = int(rb_y - lt_y + baseline)
#cv2.putText(frame, 'cam: {0:d} fps'.format(int(fps.video)), (lt_x, rb_y), font_face, best_scale, font_color, font_thickness, cv2.LINE_8)
#cv2.putText(frame, 'pose: {0:d} fps'.format(int(fps.openpose)), (lt_x, rb_y+dy), font_face, best_scale, font_color, font_thickness, cv2.LINE_8)
#cv2.putText(frame, 'proc: {0:d} fps'.format(int(fps.processing)), (lt_x, rb_y+2*dy), font_face, best_scale, font_color, font_thickness, cv2.LINE_8)
cv2.imshow(renderer_param.frame_window_name, frame)
else:
scale_w = POSE_FRAME_DISP_SIZE/frame.shape[1]
scale_h = POSE_FRAME_DISP_SIZE/frame.shape[0]
scaled_img = cv2.resize(frame, dsize=None, fx=scale_w, fy=scale_h)
(lt_x, lt_y, rb_x, rb_y), best_scale, baseline = findFontLocate('cam: 100 fps', font_face, font_thickness, scaled_img)
poses_2d_disp = []
for pose_2d in posedata.poses_2d_raw:
num_kpt = (posedata.poses_2d_raw.shape[1] - 1) // 3
pose_2d_disp = | np.ones(pose_2d.shape[0], dtype=np.float32) | numpy.ones |
import numpy as np
import skimage.restoration as skr
import scipy.ndimage as scnd
import matplotlib as mpl
import matplotlib.pyplot as plt
import stemtool as st
import matplotlib.offsetbox as mploff
import matplotlib.gridspec as mpgs
import matplotlib_scalebar.scalebar as mpss
import numba
def phase_diff(angle_image):
"""
Differentiate a complex phase image while
ensuring that phase wrapping doesn't
distort the differentiation.
Parameters
----------
angle_image: ndarray
Wrapped phase image
Returns
-------
diff_x: ndarray
X differential of the phase image
diff_y: ndarray
Y differential of the phase image
Notes
-----
The basic idea of this is that we differentiate the
complex exponential of the phase image, and then obtain the
differentiation result by multiplying the differential with
the conjugate of the complex phase image.
Reference
---------
.. [1] <NAME>., <NAME>, and <NAME>. "Quantitative measurement
of displacement and strain fields from HREM micrographs."
Ultramicroscopy 74.3 (1998): 131-146.
"""
imaginary_image = np.exp(1j * angle_image)
diff_imaginary_x = np.zeros(imaginary_image.shape, dtype="complex_")
diff_imaginary_x[:, 0:-1] = np.diff(imaginary_image, axis=1)
diff_imaginary_y = np.zeros(imaginary_image.shape, dtype="complex_")
diff_imaginary_y[0:-1, :] = | np.diff(imaginary_image, axis=0) | numpy.diff |
# coding=utf-8
"""Fast Sample Reweighting."""
import gc
import os
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from tqdm import tqdm
from ieg import utils
from ieg.models import networks
from ieg.models.custom_ops import logit_norm
from ieg.models.custom_ops import MixMode
from ieg.models.l2rmodel import L2R
FLAGS = flags.FLAGS
logging = tf.logging
def reduce_mean(vectors):
"""Reduces mean without nan."""
return tf.where(
tf.size(vectors) > 0, tf.reduce_mean(vectors),
tf.zeros((), dtype=vectors.dtype))
def softmax(q, axis=-1):
exps = np.exp(q - np.max(q, axis=-1, keepdims=True))
return exps / np.sum(exps, axis=axis, keepdims=True)
class Queue(object):
"""Queue."""
def __init__(
self,
sess,
dataset_ds,
dataset_size,
nclass,
shape=(32, 32, 3),
capacity=1000,
batch_size=200,
beta=0.0, # use [0, 1] to smooth past history information
metric='loss'): # single gpu default
self.init_capacity = capacity
self.capacity = self.init_capacity
self.sess = sess
self.nclass = nclass
self.dataset_size = dataset_size
self.metric = metric
self.batch_size = batch_size
self.beta = beta
self.summaries = []
self.shape = shape
assert capacity >= batch_size
assert capacity >= nclass, 'Class number larger than capacity'
with tf.device('/cpu:0'):
with tf.variable_scope('queue'):
self.queue_probe_images = tf.Variable(
tf.zeros(shape=[batch_size] + list(shape), dtype=tf.float32),
trainable=False,
name='probe_images',
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
self.queue_probe_labels = tf.Variable(
tf.zeros(shape=[
batch_size,
], dtype=tf.int32),
trainable=False,
name='probe_labels',
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
if FLAGS.use_pseudo_loss:
self.queue_ema_logits = tf.Variable(
tf.zeros(shape=[dataset_size, nclass], dtype=tf.float32),
trainable=False,
name='ema_logits',
aggregation=tf.VariableAggregation.MEAN)
self.queue_data_indices = tf.Variable(
tf.zeros((self.init_capacity,), dtype=tf.float32),
trainable=False,
name='indices',
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
# For monitors
self.purity_log = tf.Variable(
tf.zeros((), dtype=tf.float32),
trainable=False,
name='purity',
aggregation=tf.VariableAggregation.MEAN)
self.capacity_log = tf.Variable(
tf.zeros((), dtype=tf.float32),
trainable=False,
name='capacity',
aggregation=tf.VariableAggregation.MEAN)
self.summaries.append(
tf.summary.histogram('queue/indices', self.queue_data_indices))
self.summaries.append(tf.summary.scalar('queue/purity', self.purity_log))
self.summaries.append(
tf.summary.scalar('queue/capacity', self.capacity_log))
self.summaries.append(
tf.summary.histogram('queue/labels', self.queue_probe_labels))
self.plh_probe_images = tf.placeholder(
tf.float32, shape=[batch_size] + list(shape))
self.plh_probe_labels = tf.placeholder(
tf.int32, shape=[
batch_size,
])
# There are three global numpy variables to maintain.
# ds_pre_scores is used for forget_event metric.
self.ds_pre_scores = np.zeros((dataset_size, 1), np.float32).squeeze()
self.ds_rates = np.zeros((dataset_size, 1), np.float32).squeeze()
# initialize as -1 for sanity check
self.ds_labels = np.zeros((dataset_size,), np.int32) - 1
self.dataset_ds = dataset_ds.batch(batch_size).prefetch(
buffer_size=tf.data.experimental.AUTOTUNE).apply(
tf.data.experimental.ignore_errors())
self.queue_all_images = np.zeros([capacity] + list(shape), np.float32)
# Init to be -1.
self.queue_all_labels = np.zeros([
capacity,
], np.float32) - 1
self.probe_update_ops = []
self.probe_update_ops.append(
tf.assign(self.queue_probe_images, self.plh_probe_images))
self.probe_update_ops.append(
tf.assign(self.queue_probe_labels, self.plh_probe_labels))
self.ids = []
| np.random.seed(FLAGS.seed) | numpy.random.seed |
# -*-coding:Utf-8 -*
# ====================================================================
# Packages
# ====================================================================
import configparser as cp
import copy
import glob
import muLAn
import muLAn.packages.general_tools as gtools
import muLAn.packages.algebra as algebra
import numpy as np
import os
import pandas as pd
import sys
import tables
class FitResults:
"""Class to read, save, and manipulate models tested during the fit.
Args:
parser (:obj:`configparser.ConfigParser`): options and configurations
for muLAn.
run_id (str): Name of a muLAn archive (i.e., the name of a run).
Default `None`.
format (str): {`ascii` | `h5`}, default `ascii`. File format to load
the MCMC results.
Attributes:
samples (`pandas.DataFrame`): table of all the samples explored by the
MCMC.
"""
def __init__(self, parser, format='ascii', **kwargs):
self.parser = parser
# Load
if format=='ascii':
self.load_aimc_from_file(parser, **kwargs)
elif format=='h5':
self.load()
def load_aimc_from_file(self, cfgsetup, **kwargs):
"""Method to load model parameters from files created during MCMC.
This method loads ASCII files created by the package EMCEE, after the
end of an MCMC run. The sampler is assumed to be and AIMC.
Args:
parser (:obj:`configparser.ConfigParser`): options and configurations
for muLAn.
run_id (str): Name of a muLAn archive (i.e., the name of a run).
Default `None`.
"""
# Identify filenames from MCMC
path = cfgsetup.get('FullPaths', 'Event') + cfgsetup.get('RelativePaths', 'Chains')
if 'run_id' in kwargs:
fnames_chains = glob.glob(path + kwargs['run_id'] + "*-c*.txt")
fnames_chains_exclude = glob.glob(path + kwargs['run_id'] + "*g*.txt")
else:
fnames_chains = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*-c*.txt")
fnames_chains_exclude = glob.glob(path + cfgsetup.get('Controls', 'Archive') + "*g*.txt")
temp =[]
for a in fnames_chains:
if (a in fnames_chains_exclude)==False:
temp.append(a)
fnames_chains = copy.deepcopy(temp)
del temp, fnames_chains_exclude
nb_chains = len(fnames_chains)
if nb_chains!=0:
samples_file = dict(
{'chi2': [], 't0': [], 'u0': [], 'tE': [], 'rho': [], \
'gamma': [], 'piEE': [], 'piEN': [], 's': [], 'q': [], \
'alpha': [], 'dalpha': [], 'ds': [], 'chain': [], 'fullid': [],\
'date_save': [], 'time_save': [], 'id': [], 'accrate': [],\
'chi2/dof': []})
# Read on the chains
if nb_chains > 0:
for i in range(nb_chains):
file = open(fnames_chains[i], 'r')
for line in file:
params_model = line
if params_model[0] == '#':
continue
try:
samples_file['id'].append(int(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][0]))
samples_file['t0'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][1]))
samples_file['u0'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][2]))
samples_file['tE'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][3]))
samples_file['rho'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][4]))
samples_file['gamma'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][5]))
samples_file['piEN'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][6]))
samples_file['piEE'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][7]))
samples_file['s'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][8]))
samples_file['q'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][9]))
samples_file['alpha'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][10]))
samples_file['dalpha'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][11]))
samples_file['ds'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][12]))
samples_file['chi2'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][13]))
samples_file['accrate'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][14]))
samples_file['date_save'].append(int(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][15]))
samples_file['time_save'].append(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][16])
samples_file['chi2/dof'].append(float(
[a for a in (params_model.split('\n')[0].split(' ')) if
(a != '')][17]))
samples_file['chain'].append(int(fnames_chains[i][-8:-4]))
samples_file['fullid'].append(-1)
except:
text = "\n\033[1m\033[91mThe file\033[0m\n" + "\033[1m\033[91m" + fnames_chains[i]\
+ "\033[0m\n\033[1m\033[91mis corrupted. muLAn killed.\033[0m"
sys.exit(text)
file.close()
# Create a pandas.DataFrame to store the runs
samples = pd.DataFrame(samples_file)
samples['dchi2'] = samples['chi2'] - | np.min(samples['chi2']) | numpy.min |
# THIS LIBRARY CONTATINS THE ALGORITHMS EXPLAINED IN THE WORK
# "Acceleration of Descent-based Optimization Algorithms via Caratheodory's Theorem"
####################################################################################
#
# This library is focused only on the development of the Caratheodory accelerated
# algorithms in the case of least-square with and without Lasso regularization.
#
# In general X represents the data/features, Y the labels and theta_0 the initial
# parameters. It returns theta (the desired argmin) and other variables to reconstruct
# the history of the algorithm.
#
# We can split the functions into three groups:
# - ADAM, SAG
# - BCD algorithm with the Caratheodory Sampling Procedure(CSP).
# The structure of the accelerated functions is this:
# a) the *_CA_* functions is the outer while of the algorithms described in
# the cited work
# b) the *_mod_* functions represent the inner while, i.e. where we use the
# the reduced measure
# c) directions_CA_steps_* functions are necessary for the parallelziation
# of the code
# - BCD w/out the Caratheodory Sampling Procedure.
# The structure of the accelerated functions is this:
# a) mom_BCD_GS_ls, mom_BCD_random_ls, BCD_GS_ls are the outer while of
# the algorithms described in the cited work w/out the CSP
# b) parallel_BCD_mom, parallel_BCD are necessary for the parallelziation
# of the code
#
####################################################################################
import os
import numpy as np
import copy, timeit, psutil
import recombination as rb
from numba import njit, prange
import multiprocessing as mp
###############################################
# ADAM
###############################################
def ADAM_ls(X,Y,theta_0,lambda_LASSO=0.,batch_size=256,
lr=1e-3,loss_accepted=1e-8,max_iter=1e2):
# it runs the ADAM algorithm specialized in the case of least-square
# with a LASSO regularization term.
# Copied from the original paper
tic = timeit.default_timer()
N = np.shape(X)[0]
iteration = 0.
loss = loss_accepted+1.
theta = np.array(theta_0)
# Adam Parameter
beta_1 = 0.9
beta_2 = 0.999
eps = 1e-8
t = 0
m = np.zeros(np.size(theta_0))
v = np.zeros(np.size(theta_0))
m_hat = np.zeros(np.size(theta_0))
v_hat = np.zeros(np.size(theta_0))
loss_story = []
time_story = []
iteration_story = []
n_cycles = int(N/batch_size)
while iteration<=max_iter:
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss.item())
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration+0.5), " | loss = ", loss,
" | time = ",timeit.default_timer()-tic)
idx_shuffled = np.random.choice(N,N, replace=False)
for i in np.arange(n_cycles):
t += 1
idx = idx_shuffled[i*batch_size:i*batch_size+batch_size]
error_persample = np.dot(X[idx],theta)-Y[idx]
error_persample = error_persample[np.newaxis].T
gr = 2*np.matmul(X[idx].T,error_persample)/N
gr += lambda_LASSO * np.sign(theta).reshape(-1,1)
m = beta_1*m + (1-beta_1)*gr[:,0]
v = beta_2*v + (1-beta_2)*np.power(gr[:,0],2)
m_hat = m/(1-beta_1**t)
v_hat = v/(1-beta_2**t)
theta -= lr*m_hat/(np.sqrt(v_hat)+eps)
iteration += 1
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss.item())
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration+0.5), " | loss = ", loss,
" | time = ",timeit.default_timer()-tic)
return (loss_story,iteration_story,theta,time_story)
###############################################
# SAG
# Observation: the leanring rate must be small
# or ''more clever strategy''
###############################################
def SAG_ls(X,Y,theta_0,lambda_LASSO=0.,batch_size=256,
lr=1e-3,loss_accepted=1e-8,max_iter=1e2):
# it runs the SAG algorithm specialized in the case of least-square
# with a LASSO regularization term.
# Copied from the original paper
tic = timeit.default_timer()
N, n = np.shape(X)
iteration = 0.
loss = loss_accepted+1.
theta = np.array(theta_0)
loss_story = []
time_story = []
iteration_story = []
n_cycles = int(N/batch_size)
gr_persample = np.zeros((N,n))
while iteration<=max_iter:
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss.item())
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration+0.5), " | loss = ", loss,
" | time = ",timeit.default_timer()-tic)
idx_shuffled = np.random.choice(N,N, replace=False)
if iteration == 0:
sum_total = 0.
for i in range(n_cycles):
idx = idx_shuffled[i*batch_size:(i+1)*batch_size]
error_persample = np.dot(X[idx],theta)-Y[idx]
error_persample = error_persample[np.newaxis].T
gr_persample[idx,:] = 2*np.multiply(X[idx,:],error_persample)
gr_persample[idx,:] += lambda_LASSO * np.sign(theta)
sum_new_idx = np.sum(gr_persample[idx,:],0)
sum_total += sum_new_idx
theta -= lr * sum_total/((i+1)*batch_size)
else:
for i in range(n_cycles):
idx = idx_shuffled[i*batch_size:i*batch_size+batch_size]
sum_old_idx = np.sum(gr_persample[idx,:],0)
error_persample = np.dot(X[idx],theta)-Y[idx]
error_persample = error_persample[np.newaxis].T
gr_persample[idx,:] = 2*np.multiply(X[idx,:],error_persample)
gr_persample[idx,:] += lambda_LASSO * np.sign(theta)
sum_new_idx = np.sum(gr_persample[idx,:],0)
sum_total = sum_total - sum_old_idx + sum_new_idx
theta -= lr * sum_total/N
iteration += 1
error_persample = | np.dot(X,theta) | numpy.dot |
from ipme.classes.cell.utils.cell_clear_selection import CellClearSelection
from ipme.utils.constants import COLORS, BORDER_COLORS, PLOT_HEIGHT, PLOT_WIDTH, SIZING_MODE, RUG_DIST_RATIO
from ipme.utils.stats import pmf
from ipme.utils.functions import find_indices
from bokeh.models import BoxSelectTool, HoverTool
from bokeh.models import ColumnDataSource
from bokeh import events
from bokeh.plotting import figure
import numpy as np
import threading
from functools import partial
class CellDiscreteHandler:
def __init__(self):
pass
@staticmethod
def initialize_glyphs_interactive(variableCell, space):
so_seg = variableCell.plot[space].segment(x0 = 'x', y0 ='y0', x1 = 'x', y1 = 'y', source = variableCell.source[space], line_alpha = 1.0, color = COLORS[0], line_width = 1, selection_color = COLORS[0], \
nonselection_color = COLORS[0], nonselection_line_alpha = 1.0)
variableCell.plot[space].scatter('x', 'y', source = variableCell.source[space], size = 4, fill_color = COLORS[0], fill_alpha = 1.0, line_color = COLORS[0], selection_fill_color = COLORS[0], \
nonselection_fill_color = COLORS[0], nonselection_fill_alpha = 1.0, nonselection_line_color = COLORS[0])
variableCell.plot[space].segment(x0 = 'x', y0 ='y0', x1 = 'x', y1 = 'y', source = variableCell.selection[space], line_alpha = 0.7, color = COLORS[2], line_width = 1)
variableCell.plot[space].scatter('x', 'y', source = variableCell.selection[space], size = 4, fill_color = COLORS[2], fill_alpha = 0.7, line_color = COLORS[2])
rec = variableCell.plot[space].segment(x0 = 'x', y0 ='y0', x1 = 'x', y1 = 'y', source = variableCell.reconstructed[space], line_alpha = 0.5, color = COLORS[1], line_width = 1)
variableCell.plot[space].scatter('x', 'y', source = variableCell.reconstructed[space], size = 4, fill_color = COLORS[1], fill_alpha = 0.5, line_color = COLORS[1])
##Add BoxSelectTool
variableCell.plot[space].add_tools(BoxSelectTool(dimensions = 'width', renderers = [so_seg]))
##Tooltips
TOOLTIPS = [("x", "@x"), ("y","@y"),]
hover = HoverTool( tooltips = TOOLTIPS, renderers = [so_seg, rec], mode = 'mouse')
variableCell.plot[space].tools.append(hover)
@staticmethod
def initialize_glyphs_static(variableCell, space):
so_seg = variableCell.plot[space].segment(x0 = 'x', y0 ='y0', x1 = 'x', y1 = 'y', source = variableCell.source[space], line_alpha = 1.0, color = COLORS[0], line_width = 1, selection_color = COLORS[0], \
nonselection_color = COLORS[0], nonselection_line_alpha = 1.0)
variableCell.plot[space].scatter('x', 'y', source = variableCell.source[space], size = 4, fill_color = COLORS[0], fill_alpha = 1.0, line_color = COLORS[0], selection_fill_color = COLORS[0], \
nonselection_fill_color = COLORS[0], nonselection_fill_alpha = 1.0, nonselection_line_color = COLORS[0])
##Tooltips
TOOLTIPS = [("x", "@x"), ("y","@y"),]
hover = HoverTool( tooltips = TOOLTIPS, renderers = [so_seg], mode = 'mouse')
variableCell.plot[space].tools.append(hover)
@staticmethod
def initialize_fig(variableCell, space):
variableCell.plot[space] = figure( x_range = variableCell.x_range[space], tools = "wheel_zoom,reset,box_zoom", toolbar_location = 'right',
plot_width = PLOT_WIDTH, plot_height = PLOT_HEIGHT, sizing_mode = SIZING_MODE)
variableCell.plot[space].border_fill_color = BORDER_COLORS[0]
variableCell.plot[space].xaxis.axis_label = ""
variableCell.plot[space].yaxis.visible = False
variableCell.plot[space].toolbar.logo = None
variableCell.plot[space].xaxis[0].ticker.desired_num_ticks = 3
@staticmethod
def initialize_fig_interactive(variableCell, space):
CellDiscreteHandler.initialize_fig(variableCell, space)
##Events
variableCell.plot[space].on_event(events.Tap, partial(CellDiscreteHandler.clear_selection_callback, variableCell, space))
variableCell.plot[space].on_event(events.SelectionGeometry, partial(CellDiscreteHandler.selectionbox_callback, variableCell, space))
##on_change
variableCell.ic.sample_inds_update[space].on_change('data', partial(variableCell.sample_inds_callback, space))
@staticmethod
def initialize_fig_static(variableCell, space):
CellDiscreteHandler.initialize_fig(variableCell, space)
##on_change
variableCell.ic.sample_inds_update[space].on_change('data', partial(variableCell.sample_inds_callback, space))
@staticmethod
def initialize_cds(variableCell, space):
samples = variableCell.get_data_for_cur_idx_dims_values(space)
variableCell.source[space] = ColumnDataSource(data = pmf(samples))
variableCell.samples[space] = ColumnDataSource(data = dict(x = samples))
@staticmethod
def initialize_cds_interactive(variableCell, space):
CellDiscreteHandler.initialize_cds(variableCell, space)
variableCell.selection[space] = ColumnDataSource(data = dict(x = np.array([]), y = np.array([]), y0 = np.array([])))
variableCell.reconstructed[space] = ColumnDataSource(data = dict(x = np.array([]), y = np.array([]), y0 = | np.array([]) | numpy.array |
from mujoco_py import load_model_from_path, MjSim, MjViewer
import numpy as np
from numpy import matlib
from scipy import signal, stats
from sklearn.neural_network import MLPRegressor
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import matplotlib.lines as mlines
#import pickle
import os
from copy import deepcopy
from mujoco_py.generated import const
from all_functions import *
def calculate_closeloop_inputkinematics(step_number, real_attempt_positions, desired_kinematics, q_error_cum, P, I, delay_timesteps, gradient_edge_order=1, timestep=.005):
q_desired = desired_kinematics[step_number, np.ix_([0,3])][0]
q_dot_desired = desired_kinematics[step_number, np.ix_([1,4])][0]
q_error = q_desired - real_attempt_positions[step_number-1-delay_timesteps,:]
q_error_cum[step_number,:] = q_error
#import pdb; pdb.set_trace()
q_dot_in = q_dot_desired + np.array(P)*q_error + np.array(I)*(q_error_cum.sum(axis=0)*timestep)
q_double_dot_in = [
np.gradient(desired_kinematics[step_number-gradient_edge_order:step_number+1,1],edge_order=gradient_edge_order)[-1]/timestep,
np.gradient(desired_kinematics[step_number-gradient_edge_order:step_number+1,4],edge_order=gradient_edge_order)[-1]/timestep]
#desired_kinematics[step_number, np.ix_([2,5])][0]#
input_kinematics = [q_desired[0], q_dot_in[0], q_double_dot_in[0], q_desired[1], q_dot_in[1], q_double_dot_in[1]]
# There are multiple ways of calculating q_double_dot:
# 1- d(v_input(last_step)-v_desired(last_step))/dt
# 2- d(v_input(current)-v_observed(last_step))/dt
# 3- d(v_input(current)-v_input(last_step))/dt
# 1 nad 2 will have jumps and therefore can cause huge values while differentiated. 3 on the otherhand will not make much physical sense
# and will not be helping the system reach its goal since it is it is disregardin considering the plant or the goal velocity alltogether.
# We observed that using the acceleration values coming from the feedforward system (desired q_double_dot) works better than the alternatives
# however, acceleration value can be set differently for other specific goals. For example, for velocity tracking (the main focus of this
# project is position tracking), acceleration can be set corresponding to the velocity error to compensate for this error.
return input_kinematics, q_error_cum
def closeloop_run_fcn(model, desired_kinematics, P, I, delay_timesteps=0, model_ver=0, plot_outputs=True, Mj_render=False, timestep=.005):
est_activations = estimate_activations_fcn(model, desired_kinematics)
number_of_task_samples = desired_kinematics.shape[0]
chassis_pos=np.zeros(number_of_task_samples,)
input_kinematics = np.zeros(desired_kinematics.shape)
real_attempt_positions = np.zeros([number_of_task_samples,2])
real_attempt_activations = np.zeros([number_of_task_samples,3])
q_error_cum = np.zeros([number_of_task_samples,2]) # sample error history
Mj_model = load_model_from_path("./models/nmi_leg_w_chassis_v{}.xml".format(model_ver))
sim = MjSim(Mj_model)
if Mj_render:
viewer = MjViewer(sim)
viewer.cam.fixedcamid += 1
viewer.cam.type = const.CAMERA_FIXED
sim_state = sim.get_state()
control_vector_length=sim.data.ctrl.__len__()
print("control_vector_length: "+str(control_vector_length))
sim.set_state(sim_state)
gradient_edge_order = 1
for ii in range(number_of_task_samples):
if ii < max(gradient_edge_order, delay_timesteps+1):
#print(ii)
input_kinematics[ii,:] = desired_kinematics[ii,:]
else:
[input_kinematics[ii,:], q_error_cum] = calculate_closeloop_inputkinematics(
step_number=ii,
real_attempt_positions=real_attempt_positions,
desired_kinematics=desired_kinematics,
q_error_cum=q_error_cum,
P=P,
I=I,
delay_timesteps=delay_timesteps,
gradient_edge_order=gradient_edge_order,
timestep=timestep)
est_activations[ii,:] = model.predict([input_kinematics[ii,:]])[0,:]
sim.data.ctrl[:] = est_activations[ii,:]
sim.step()
chassis_pos[ii]=sim.data.get_geom_xpos("Chassis_frame")[0]
current_positions_array = sim.data.qpos[-2:]
real_attempt_positions[ii,:] = current_positions_array
real_attempt_activations[ii,:] = sim.data.ctrl
if Mj_render:
viewer.render()
real_attempt_kinematics = positions_to_kinematics_fcn(
real_attempt_positions[:,0],
real_attempt_positions[:,1],
timestep=timestep)
error0 = error_cal_fcn(desired_kinematics[:,0], real_attempt_kinematics[:,0])
error1 = error_cal_fcn(desired_kinematics[:,3], real_attempt_kinematics[:,3])
average_error = 0.5*(error0+error1)
if plot_outputs:
#plt.figure()
alpha=.8
plot_t = np.linspace(timestep, desired_kinematics.shape[0]*timestep, desired_kinematics.shape[0])
plt.subplot(2, 1, 1)
plt.plot(plot_t, desired_kinematics[:,0], 'k', plot_t, real_attempt_kinematics[:,0], 'C1', alpha=.9)
plt.ylabel("$q_1$ (rads)")
plt.subplot(2, 1, 2)
plt.plot(plot_t, desired_kinematics[:,3], 'k', plot_t, real_attempt_kinematics[:,3], 'C1', alpha=.9)
plt.ylabel("$q_2$ (rads)")
plt.xlabel("time (s)")
plt.show(block=True)
return average_error, real_attempt_kinematics, real_attempt_activations
def openloop_run_fcn(model, desired_kinematics, model_ver=0, plot_outputs=False, Mj_render=False, timestep=.005):
est_activations = estimate_activations_fcn(model, desired_kinematics)
[real_attempt_kinematics, real_attempt_activations, chassis_pos] = run_activations_fcn(est_activations, model_ver=model_ver, timestep=0.005, Mj_render=Mj_render)
error0 = error_cal_fcn(desired_kinematics[:,0], real_attempt_kinematics[:,0])
error1 = error_cal_fcn(desired_kinematics[:,3], real_attempt_kinematics[:,3])
average_error = 0.5*(error0+error1)
if plot_outputs:
plt.figure(figsize=(10, 6))
plt.rcParams.update({'font.size': 18})
alpha=.8
plot_t = np.linspace(timestep, desired_kinematics.shape[0]*timestep, desired_kinematics.shape[0])
plt.subplot(2, 1, 1)
plt.plot(plot_t, desired_kinematics[:,0], 'tab:gray', plot_t, real_attempt_kinematics[:,0], 'C0', alpha=.9)
plt.ylabel("$q_1$ (rads)")
plt.ylim([-1.2, 1.2])
plt.subplot(2, 1, 2)
plt.plot(plot_t, desired_kinematics[:,3], 'tab:gray', plot_t, real_attempt_kinematics[:,3], 'C0', alpha=.9)
plt.ylabel("$q_2$ (rads)")
plt.xlabel("time (s)")
plt.ylim([-1.7, .2])
#.show(block=True)
return average_error, real_attempt_kinematics, real_attempt_activations
def p2p_positions_gen_fcn(low, high, number_of_positions, duration_of_each_position, timestep):
sample_no_of_each_position = duration_of_each_position / timestep
random_array = np.zeros(int(np.round(number_of_positions*sample_no_of_each_position)),)
for ii in range(number_of_positions):
random_value = ((high-low)*(np.random.rand(1)[0])) + low
random_array_1position = np.repeat(random_value,sample_no_of_each_position)
random_array[int(ii*sample_no_of_each_position):int((ii+1)*sample_no_of_each_position)] = random_array_1position
return random_array
def plot_comparison_figures_fcn(errors_all, experiments_switch, trial_number):
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.rcParams.update({'font.size': 18})
# plt 1: vs cycle period
if experiments_switch[0]:
plt.figure(figsize=(10, 6))
plt.plot(np.linspace(.5,10,trial_number), errors_all[0][0,:], color='C0', marker='.')
plt.plot(np.linspace(.5,10,trial_number), errors_all[0][1,:], color='C1', marker='.')
plt.ylim(0,.65)
ax = plt.gca()
xmin, xmax = ax.get_xbound()
mean_error_wo = mlines.Line2D([xmin,xmax], [errors_all[0][0,:].mean(),errors_all[0][0,:].mean()],color='C0', linestyle='--', alpha=.7)
ax.add_line(mean_error_wo)
mean_error_wf = mlines.Line2D([xmin,xmax], [errors_all[0][1,:].mean(),errors_all[0][1,:].mean()],color='C1', linestyle='--', alpha=.7)
ax.add_line(mean_error_wf)
#plt.title("Error as a function of cycle period")
plt.legend(["open-loop",'close-loop'])
plt.xlabel("cycle period (s)")
plt.ylabel("error (rads)")
plt.tick_params(axis='y', rotation=45) # Set rotation for yticks
plt.savefig('./results/P_I/exp1.png')
plt.show()
## with phys
phys_ol_avg_error = np.array([22.6366, 13.8909, 18.2556, 23.2015, 23.1718, 21.5603, 21.6143, 22.1547, 18.6581, 18.2983])*np.pi/180
phys_cl_avg_error = np.array([17.1090, 11.9084, 10.3141, 10.1112, 9.7079, 9.2170, 9.2056, 9.5024, 9.3722, 9.7379])*np.pi/180
plt.figure(figsize=(10, 5))
plt.plot(np.linspace(.5,10,trial_number), errors_all[0][0,:], linewidth=3.0, color='cornflowerblue', marker='.', alpha=.6)
plt.plot(np.linspace(.5,10,trial_number), errors_all[0][1,:], linewidth=3.0, color='orange', marker='.', alpha=.4)
plt.plot(np.linspace(1,10,10), phys_ol_avg_error, linewidth=3.0, color='royalblue', marker='.', alpha=.8)
plt.plot( np.linspace(1,10,10), phys_cl_avg_error, linewidth=3.0, color='darkorange', marker='.', alpha=.8)
plt.ylim(0,.65)
ax = plt.gca()
xmin, xmax = ax.get_xbound()
mean_error_wo = mlines.Line2D([xmin,xmax], [errors_all[0][0,:].mean(),errors_all[0][0,:].mean()],color='cornflowerblue', linestyle='--', alpha=.6)
ax.add_line(mean_error_wo)
mean_error_wf = mlines.Line2D([xmin,xmax], [errors_all[0][1,:].mean(),errors_all[0][1,:].mean()],color='orange', linestyle='--', alpha=.4)
ax.add_line(mean_error_wf)
mean_error_phys_wo = mlines.Line2D([xmin,xmax], [phys_ol_avg_error.mean(),phys_ol_avg_error.mean()],color='royalblue', linestyle='--', alpha=.8)
ax.add_line(mean_error_phys_wo)
mean_error_phys_wf = mlines.Line2D([xmin,xmax], [phys_cl_avg_error.mean(),phys_cl_avg_error.mean()],color='darkorange', linestyle='--', alpha=.8)
ax.add_line(mean_error_phys_wf)
#plt.title("Error as a function of cycle period")
plt.legend(["open-loop (sim)",'close-loop (sim)','open-loop (phys)','close-loop (phys)'], fontsize='small')
plt.subplots_adjust(bottom = .13, top= .95)
plt.xlabel("cycle period (s)")
plt.ylabel("error (rads)")
plt.tick_params(axis='y', rotation=45) # Set rotation for yticks
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
plt.savefig('./results/P_I/exp1_plus.pdf')
plt.show()
#plt 2: 50 cyclical
if experiments_switch[1]:
plt.figure(figsize=(10, 6))
plt.plot(range(errors_all[1][0,:].shape[0]), errors_all[1][0,:], range(errors_all[1][0,:].shape[0]), errors_all[1][1,:], marker='.')
plt.ylim(0,.65)
ax = plt.gca()
xmin, xmax = ax.get_xbound()
mean_error_wo = mlines.Line2D([xmin,xmax], [errors_all[1][0,:].mean(),errors_all[1][0,:].mean()],color='C0', linestyle='--', alpha=.7)
ax.add_line(mean_error_wo)
mean_error_wf = mlines.Line2D([xmin,xmax], [errors_all[1][1,:].mean(),errors_all[1][1,:].mean()],color='C1', linestyle='--', alpha=.7)
ax.add_line(mean_error_wf)
#plt.title("Error values over a set of cyclical tasks")
plt.legend(["open-loop",'close-loop'])
plt.xlabel("trial #")
plt.ylabel("error (rads)")
plt.tick_params(axis='y', rotation=45) # Set rotation for yticks
plt.savefig('./results/P_I/exp2.png')
plt.show()
#plt 3: 50 p2p
if experiments_switch[2]:
plt.figure(figsize=(10, 6))
plt.plot(range(errors_all[2][0,:].shape[0]), errors_all[2][0,:], range(errors_all[2][0,:].shape[0]), errors_all[2][1,:], marker='.')
plt.ylim(0,.65)
ax = plt.gca()
xmin, xmax = ax.get_xbound()
mean_error_wo = mlines.Line2D([xmin,xmax], [errors_all[2][0,:].mean(),errors_all[2][0,:].mean()],color='C0', linestyle='--', alpha=.7)
ax.add_line(mean_error_wo)
mean_error_wf = mlines.Line2D([xmin,xmax], [errors_all[2][1,:].mean(),errors_all[2][1,:].mean()],color='C1', linestyle='--', alpha=.7)
ax.add_line(mean_error_wf)
#plt.title("Error values over a set of point-to-point tasks")
plt.legend(["open-loop",'close-loop'])
plt.xlabel("trial #")
plt.ylabel("error (rads)")
plt.tick_params(axis='y', rotation=45) # Set rotation for yticks
plt.savefig('./results/P_I/exp3.png')
plt.show()
# plt 4: compare all
if experiments_switch[3]:
plt.figure(figsize=(10, 6))
t_plot = | np.linspace(1,5,5) | numpy.linspace |
#!/usr/bin/env python
from __future__ import print_function
import ctypes
from functools import partial
from collections import namedtuple
import sys
if sys.version_info[0] < 3:
from collections import Sequence
else:
from collections.abc import Sequence
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests, unittest
def is_numeric(dtype):
return np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.floating)
def get_limits(dtype):
if not is_numeric(dtype):
return None, None
if np.issubdtype(dtype, np.integer):
info = np.iinfo(dtype)
else:
info = np.finfo(dtype)
return info.min, info.max
def get_conversion_error_msg(value, expected, actual):
return 'Conversion "{}" of type "{}" failed\nExpected: "{}" vs Actual "{}"'.format(
value, type(value).__name__, expected, actual
)
def get_no_exception_msg(value):
return 'Exception is not risen for {} of type {}'.format(value, type(value).__name__)
class Bindings(NewOpenCVTests):
def test_inheritance(self):
bm = cv.StereoBM_create()
bm.getPreFilterCap() # from StereoBM
bm.getBlockSize() # from SteroMatcher
boost = cv.ml.Boost_create()
boost.getBoostType() # from ml::Boost
boost.getMaxDepth() # from ml::DTrees
boost.isClassifier() # from ml::StatModel
def test_raiseGeneralException(self):
with self.assertRaises((cv.error,),
msg='C++ exception is not propagated to Python in the right way') as cm:
cv.utils.testRaiseGeneralException()
self.assertEqual(str(cm.exception), 'exception text')
def test_redirectError(self):
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as _e:
pass
handler_called = [False]
def test_error_handler(status, func_name, err_msg, file_name, line):
handler_called[0] = True
cv.redirectError(test_error_handler)
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as _e:
self.assertEqual(handler_called[0], True)
pass
cv.redirectError(None)
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as _e:
pass
def test_overload_resolution_can_choose_correct_overload(self):
val = 123
point = (51, 165)
self.assertEqual(cv.utils.testOverloadResolution(val, point),
'overload (int={}, point=(x={}, y={}))'.format(val, *point),
"Can't select first overload if all arguments are provided as positional")
self.assertEqual(cv.utils.testOverloadResolution(val, point=point),
'overload (int={}, point=(x={}, y={}))'.format(val, *point),
"Can't select first overload if one of the arguments are provided as keyword")
self.assertEqual(cv.utils.testOverloadResolution(val),
'overload (int={}, point=(x=42, y=24))'.format(val),
"Can't select first overload if one of the arguments has default value")
rect = (1, 5, 10, 23)
self.assertEqual(cv.utils.testOverloadResolution(rect),
'overload (rect=(x={}, y={}, w={}, h={}))'.format(*rect),
"Can't select second overload if all arguments are provided")
def test_overload_resolution_fails(self):
def test_overload_resolution(msg, *args, **kwargs):
no_exception_msg = 'Overload resolution failed without any exception for: "{}"'.format(msg)
wrong_exception_msg = 'Overload resolution failed with wrong exception type for: "{}"'.format(msg)
with self.assertRaises((cv.error, Exception), msg=no_exception_msg) as cm:
res = cv.utils.testOverloadResolution(*args, **kwargs)
self.fail("Unexpected result for {}: '{}'".format(msg, res))
self.assertEqual(type(cm.exception), cv.error, wrong_exception_msg)
test_overload_resolution('wrong second arg type (keyword arg)', 5, point=(1, 2, 3))
test_overload_resolution('wrong second arg type', 5, 2)
test_overload_resolution('wrong first arg', 3.4, (12, 21))
test_overload_resolution('wrong first arg, no second arg', 4.5)
test_overload_resolution('wrong args number for first overload', 3, (12, 21), 123)
test_overload_resolution('wrong args number for second overload', (3, 12, 12, 1), (12, 21))
# One of the common problems
test_overload_resolution('rect with float coordinates', (4.5, 4, 2, 1))
test_overload_resolution('rect with wrong number of coordinates', (4, 4, 1))
class Arguments(NewOpenCVTests):
def _try_to_convert(self, conversion, value):
try:
result = conversion(value).lower()
except Exception as e:
self.fail(
'{} "{}" is risen for conversion {} of type {}'.format(
type(e).__name__, e, value, type(value).__name__
)
)
else:
return result
def test_InputArray(self):
res1 = cv.utils.dumpInputArray(None)
# self.assertEqual(res1, "InputArray: noArray()") # not supported
self.assertEqual(res1, "InputArray: empty()=true kind=0x00010000 flags=0x01010000 total(-1)=0 dims(-1)=0 size(-1)=0x0 type(-1)=CV_8UC1")
res2_1 = cv.utils.dumpInputArray((1, 2))
self.assertEqual(res2_1, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=2 dims(-1)=2 size(-1)=1x2 type(-1)=CV_64FC1")
res2_2 = cv.utils.dumpInputArray(1.5) # Scalar(1.5, 1.5, 1.5, 1.5)
self.assertEqual(res2_2, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=4 dims(-1)=2 size(-1)=1x4 type(-1)=CV_64FC1")
a = np.array([[1, 2], [3, 4], [5, 6]])
res3 = cv.utils.dumpInputArray(a) # 32SC1
self.assertEqual(res3, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=6 dims(-1)=2 size(-1)=2x3 type(-1)=CV_32SC1")
a = np.array([[[1, 2], [3, 4], [5, 6]]], dtype='f')
res4 = cv.utils.dumpInputArray(a) # 32FC2
self.assertEqual(res4, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=3 dims(-1)=2 size(-1)=3x1 type(-1)=CV_32FC2")
a = np.array([[[1, 2]], [[3, 4]], [[5, 6]]], dtype=float)
res5 = cv.utils.dumpInputArray(a) # 64FC2
self.assertEqual(res5, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=3 dims(-1)=2 size(-1)=1x3 type(-1)=CV_64FC2")
a = np.zeros((2,3,4), dtype='f')
res6 = cv.utils.dumpInputArray(a)
self.assertEqual(res6, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=6 dims(-1)=2 size(-1)=3x2 type(-1)=CV_32FC4")
a = np.zeros((2,3,4,5), dtype='f')
res7 = cv.utils.dumpInputArray(a)
self.assertEqual(res7, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=120 dims(-1)=4 size(-1)=[2 3 4 5] type(-1)=CV_32FC1")
def test_InputArrayOfArrays(self):
res1 = cv.utils.dumpInputArrayOfArrays(None)
# self.assertEqual(res1, "InputArray: noArray()") # not supported
self.assertEqual(res1, "InputArrayOfArrays: empty()=true kind=0x00050000 flags=0x01050000 total(-1)=0 dims(-1)=1 size(-1)=0x0")
res2_1 = cv.utils.dumpInputArrayOfArrays((1, 2)) # { Scalar:all(1), Scalar::all(2) }
self.assertEqual(res2_1, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_64FC1 dims(0)=2 size(0)=1x4")
res2_2 = cv.utils.dumpInputArrayOfArrays([1.5])
self.assertEqual(res2_2, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=1 dims(-1)=1 size(-1)=1x1 type(0)=CV_64FC1 dims(0)=2 size(0)=1x4")
a = np.array([[1, 2], [3, 4], [5, 6]])
b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
res3 = cv.utils.dumpInputArrayOfArrays([a, b])
self.assertEqual(res3, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_32SC1 dims(0)=2 size(0)=2x3")
c = np.array([[[1, 2], [3, 4], [5, 6]]], dtype='f')
res4 = cv.utils.dumpInputArrayOfArrays([c, a, b])
self.assertEqual(res4, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=3 dims(-1)=1 size(-1)=3x1 type(0)=CV_32FC2 dims(0)=2 size(0)=3x1")
a = np.zeros((2,3,4), dtype='f')
res5 = cv.utils.dumpInputArrayOfArrays([a, b])
self.assertEqual(res5, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_32FC4 dims(0)=2 size(0)=3x2")
# TODO: fix conversion error
#a = np.zeros((2,3,4,5), dtype='f')
#res6 = cv.utils.dumpInputArray([a, b])
#self.assertEqual(res6, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_32FC1 dims(0)=4 size(0)=[2 3 4 5]")
def test_20968(self):
pixel = np.uint8([[[40, 50, 200]]])
_ = cv.cvtColor(pixel, cv.COLOR_RGB2BGR) # should not raise exception
def test_parse_to_bool_convertible(self):
try_to_convert = partial(self._try_to_convert, cv.utils.dumpBool)
for convertible_true in (True, 1, 64, np.bool(1), np.int8(123), np.int16(11), np.int32(2),
np.int64(1), np.bool_(3), np.bool8(12)):
actual = try_to_convert(convertible_true)
self.assertEqual('bool: true', actual,
msg=get_conversion_error_msg(convertible_true, 'bool: true', actual))
for convertible_false in (False, 0, | np.uint8(0) | numpy.uint8 |
import numpy as np
import nibabel as nib
import copy
from eisen.transforms.imaging import CreateConstantFlags
from eisen.transforms.imaging import RenameFields
from eisen.transforms.imaging import FilterFields
from eisen.transforms.imaging import ResampleNiftiVolumes
from eisen.transforms.imaging import NiftiToNumpy
from eisen.transforms.imaging import NumpyToNifti
from eisen.transforms.imaging import CropCenteredSubVolumes
from eisen.transforms.imaging import MapValues
class TestCreateConstantFlags:
def setup_class(self):
self.data = {
'image': np.random.rand(32, 32, 3),
'label': 1
}
self.tform_one = CreateConstantFlags(['flag1', 'flag2'], [32.2, 42.0])
self.tform_two = CreateConstantFlags(['flag3', 'flag4', 'flag5'], ['flag3', 42, False])
def test_call(self):
self.data = self.tform_one(self.data)
assert 'flag1' in self.data.keys()
assert 'flag2' in self.data.keys()
assert self.data['flag1'] == 32.2
assert self.data['flag2'] == 42.0
self.data = self.tform_two(self.data)
assert 'flag3' in self.data.keys()
assert 'flag4' in self.data.keys()
assert 'flag5' in self.data.keys()
assert self.data['flag3'] == 'flag3'
assert self.data['flag4'] == 42
assert self.data['flag5'] is False
class TestRenameFields:
def setup_class(self):
self.data = {
'image': np.ones([32, 32, 3], dtype=np.float32),
'label': 0
}
self.tform_one = RenameFields(['image', 'label'], ['new_image', 'new_label'])
self.tform_two = RenameFields(['new_image'], ['image'])
def test_call(self):
self.data = self.tform_one(self.data)
assert 'new_image' in self.data.keys()
assert 'new_label' in self.data.keys()
assert 'image' not in self.data.keys()
assert 'label' not in self.data.keys()
assert np.all(self.data['new_image'] == 1)
assert self.data['new_label'] == 0
self.data = self.tform_two(self.data)
assert 'new_image' not in self.data.keys()
assert 'image' in self.data.keys()
assert np.all(self.data['image'] == 1)
class TestFilterFields:
def setup_class(self):
self.data = {
'image': np.ones([32, 32, 3], dtype=np.float32),
'label': 0
}
self.tform_one = FilterFields(['image', 'label'])
self.tform_two = FilterFields(['image'])
def test_call(self):
self.data = self.tform_one(self.data)
assert 'image' in self.data.keys()
assert 'label' in self.data.keys()
assert np.all(self.data['image'] == 1)
assert self.data['label'] == 0
self.data = self.tform_two(self.data)
assert 'label' not in self.data.keys()
assert 'image' in self.data.keys()
assert np.all(self.data['image'] == 1)
class TestResampleNiftiVolumes:
def setup_class(self):
data = np.ones([32, 32, 32]).astype(np.float32)
data = data * np.asarray(range(32))
img = nib.Nifti1Image(data, np.eye(4))
self.data = {
'image': img,
'label': 0
}
self.tform_one = ResampleNiftiVolumes(['image'], [0.5, 0.5, 0.5], interpolation='linear')
self.tform_two = ResampleNiftiVolumes(['image'], [1.0, 1.0, 1.0], interpolation='linear')
self.tform_three = ResampleNiftiVolumes(['image'], [2.0, 2.0, 2.0], interpolation='linear')
def test_call(self):
self.data = self.tform_one(self.data)
assert 'image' in self.data.keys()
assert 'label' in self.data.keys()
assert self.data['image'].shape[0] == 63
assert self.data['image'].shape[1] == 63
assert self.data['image'].shape[2] == 63
dta = np.asanyarray(self.data['image'].dataobj)
assert np.max(dta) == 31
assert np.min(dta) == 0
assert np.all(dta[0, 0, :] == np.arange(0, 31.5, step=0.5))
self.data = self.tform_two(self.data)
assert 'image' in self.data.keys()
assert 'label' in self.data.keys()
assert self.data['image'].shape[0] == 32
assert self.data['image'].shape[1] == 32
assert self.data['image'].shape[2] == 32
dta = np.asanyarray(self.data['image'].dataobj)
assert | np.max(dta) | numpy.max |
import cv2
import numpy as np
import os
import argparse
import imageio
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation
from camera_calib.robonet_calibration import display_annotation
tip_coord = []
use_for_calibration = ["berkeley_sawyer_traj973", "berkeley_sawyer_traj5214", "berkeley_sawyer_traj2909"]
SCALE = 4 # how much larger to display the image
VISUAL_REPROJ = True
def click_and_crop(event, x, y, flags, param):
# grab references to the global variables
global tip_coord
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being
# performed
if event == cv2.EVENT_LBUTTONDOWN:
tip_coord = [x, y]
def annotate_img(img):
go_back = False
is_fail = False
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_and_crop)
while True:
# display the image and wait for a keypress
cv2.imshow("image", img[:, :, ::-1])
key = cv2.waitKey(1) & 0xFF
# if the 'c' key is pressed, break from the loop
if key == 32: # space
break
elif key == ord("g"):
is_fail = False
elif key == ord("f"):
is_fail = True
break
elif key == ord("r"):
go_back = True
break
cv2.destroyAllWindows()
return go_back, is_fail
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="calibrate specific robot and viewpoint"
)
parser.add_argument("robot", type=str, help="robot")
parser.add_argument("viewpoint", type=str, help="viewpoint")
parser.add_argument("num_trajectories", type=int, default=3, help="number of trajectories used for calibration")
parser.add_argument(
"--direct_calibrate", action="store_true", help="directly calibrate if annotation was done"
)
parser.add_argument(
"--visual_distribution", action="store_true", help="visualize the distribution of eef"
)
args = parser.parse_args()
target_dir = args.robot + "/" + args.viewpoint
if not args.direct_calibrate:
all_pixel_coords = []
all_3d_pos = []
num_annotated = 0
# TODO: change what experiment to load
for exp_id in use_for_calibration:
states = np.load(target_dir + "/states_" + exp_id + ".npy")
labels = []
temp_states = []
gif = imageio.get_reader(target_dir + "/" + exp_id + ".gif")
t = 0
for img in gif:
img = img[:, :, :3]
print(img.shape)
img = cv2.resize(
img, (img.shape[1] * SCALE, img.shape[0] * SCALE))
go_back, is_fail = annotate_img(img)
if not is_fail:
x = tip_coord[0] / SCALE
y = tip_coord[1] / SCALE
display_annotation(img, [x, y])
temp_states.append(states[t])
labels.append([x, y])
print(labels[-1])
else:
print("skip label")
num_annotated += 1
print("Annotated", num_annotated)
t += 1
all_pixel_coords.extend(labels) # |exp * T| x 2
all_3d_pos.extend(temp_states) # |exp * T| x 3
all_pixel_coords = np.array(all_pixel_coords)
all_3d_pos = np.array(all_3d_pos)
np.save(target_dir + "/all_pixel_coords", all_pixel_coords)
np.save(target_dir + "/all_3d_pos", all_3d_pos)
print("Congrats, you're done with this one!")
else:
all_pixel_coords = np.load(target_dir + "/all_pixel_coords.npy")
all_3d_pos = np.load(target_dir + "/all_3d_pos.npy")
print("pixel coords shape", all_pixel_coords.shape)
print("loaded 3d pos shape", all_3d_pos.shape)
# calibration section starts here
all_3d_pos = np.array(all_3d_pos[:, 0:3])
print("3d pos shape", all_3d_pos.shape)
all_pixel_coords = np.array(all_pixel_coords, dtype=np.float32)
intrinsic_guess = np.array([[320.75, 0, 160],
[0, 320.75, 120],
[0, 0, 1]])
img_shape = (240, 320)
flags = cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_FIX_PRINCIPAL_POINT + cv2.CALIB_FIX_FOCAL_LENGTH
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
[all_3d_pos], [all_pixel_coords],
img_shape, intrinsic_guess, None, flags=flags)
print("calibrated camera intrinsic:\n", mtx)
r = Rotation.from_rotvec(rvecs[0].reshape(-1))
ext_R = r.as_matrix()
ext = np.column_stack((ext_R, tvecs[0]))
full_ext = np.row_stack((ext, [0, 0, 0, 1]))
print("calibrated camera extrinsic:\n", full_ext)
projM = mtx @ full_ext[:3]
print("calibrated projection matrix:\n", projM)
cameraTworld = np.linalg.inv(full_ext)
print("calibrated camera to world transformation:\n", cameraTworld)
print("camera 3d position:\n", cameraTworld[:3, 3])
R_cTw = cameraTworld[0:3]
R_cTw = R_cTw[:, :3]
r = Rotation.from_matrix(R_cTw)
camera_orient = r.as_quat()
print("camera orientation (quarternion):\n", camera_orient)
if args.visual_distribution:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(all_3d_pos[:, 0], all_3d_pos[:, 1], all_3d_pos[:, 2])
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
if VISUAL_REPROJ:
for exp_id in use_for_calibration:
gif = imageio.get_reader(target_dir + "/" + exp_id + ".gif")
t = 0
for img in gif:
img = img[:, :, :3]
img = cv2.resize(
img, (img.shape[1] * SCALE, img.shape[0] * SCALE))
states = np.load(target_dir + "/states_" + exp_id + ".npy")
state = states[t, :3]
state = np.concatenate([state, [1]])
print("state:", state)
pix_3d = projM @ state
pix_2d = | np.array([pix_3d[0] / pix_3d[2], pix_3d[1] / pix_3d[2]]) | numpy.array |
from osgeo import gdal
import numpy as np
import DataHandler.Raster.RasterUtils as rast_utils
import Services.FileUtils as file_utils
class RasterAnalysis:
"""
A class to record the changes taking place among to time series steps
"""
@staticmethod
def raster_changes_matrix(rast1, rast2, output_csv):
"""
Present a change value matrix into csv format
For each raster we get the distinct pixel values
and we map changes from raster1 to raster2 into a 2d matrix
:param output_csv:
:param rast1:
:param rast2:
:output_csv: the path to save in csv format the 2d changes matrix
"""
raster_arr1 = rast_utils.RasterUtils.get_raster_array_from_image(rast1, 1)
unique_vals1 = np.unique(raster_arr1)
raster_arr2 = rast_utils.RasterUtils.get_raster_array_from_image(rast2, 1)
unique_vals2 = np.unique(raster_arr2)
# create the matrix full filled with zeros
matrix = np.zeros((len(unique_vals1), len(unique_vals2)))
# populate the matrix with counts of each unique value (change from --> to)
print('len(raster_arr1)' + str(len(raster_arr1)))
print('len(raster_arr1)' + str(len(raster_arr1)))
for y in range(len(raster_arr1)):
for x in range(len(raster_arr1[y])):
s1 = | np.where(unique_vals1 == raster_arr1[y][x]) | numpy.where |
import unittest
import numpy as np
from scipy.spatial.transform import Rotation
from xrd_simulator import templates, utils
class TestUtils(unittest.TestCase):
def setUp(self):
np.random.seed(5) # changes all randomization in the test
def test_s3dxrd(self):
parameters = {
"detector_distance": 191023.9164,
"detector_center_pixel_z": 256.2345,
"detector_center_pixel_y": 255.1129,
"pixel_side_length_z": 181.4234,
"pixel_side_length_y": 180.2343,
"number_of_detector_pixels_z": 512,
"number_of_detector_pixels_y": 512,
"wavelength": 0.285227,
"beam_side_length_z": 512 * 200.,
"beam_side_length_y": 512 * 200.,
"rotation_step": np.radians(1.634),
"rotation_axis": np.array([0., 0., 1.0])
}
beam, detector, motion = templates.s3dxrd(parameters)
for ci in beam.centroid:
self.assertAlmostEqual(ci, 0, msg="beam not at origin.")
det_approx_centroid = detector.det_corner_0.copy()
det_approx_centroid[1] += detector.det_corner_1[1]
det_approx_centroid[2] += detector.det_corner_2[2]
self.assertAlmostEqual(
det_approx_centroid[0],
parameters["detector_distance"],
msg="Detector distance wrong.")
self.assertLessEqual(
np.abs(
det_approx_centroid[1]),
5 * parameters["pixel_side_length_y"],
msg="Detector not centered.")
self.assertLessEqual(
np.abs(
det_approx_centroid[2]),
5 * parameters["pixel_side_length_z"],
msg="Detector not centered.")
original_vector = np.random.rand(3,) - 0.5
time = 0.234986
transformed_vector = motion(original_vector, time)
angle = parameters["rotation_step"] * time
s, c = np.sin(angle), np.cos(angle)
Rz = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
self.assertAlmostEqual(transformed_vector[0], np.dot(Rz, original_vector)[
0], msg="Motion does not rotate around z-axis")
self.assertAlmostEqual(transformed_vector[1], np.dot(Rz, original_vector)[
1], msg="Motion does not rotate around z-axis")
self.assertAlmostEqual(
transformed_vector[2],
original_vector[2],
msg="Motion does not rotate around z-axis")
def test_polycrystal_from_odf(self):
unit_cell = [4.926, 4.926, 5.4189, 90., 90., 120.]
sgname = 'P3221' # Quartz
def orientation_density_function(
x, q): return 1. / (np.pi**2) # uniform ODF
number_of_crystals = 500
sample_bounding_cylinder_height = 50
sample_bounding_cylinder_radius = 25
maximum_sampling_bin_seperation = np.radians(10.0)
# Linear strain gradient along rotation axis.
def strain_tensor(x): return np.array(
[[0, 0, 0.02 * x[2] / sample_bounding_cylinder_height], [0, 0, 0], [0, 0, 0]])
polycrystal = templates.polycrystal_from_odf(
orientation_density_function,
number_of_crystals,
sample_bounding_cylinder_height,
sample_bounding_cylinder_radius,
unit_cell,
sgname,
path_to_cif_file=None,
maximum_sampling_bin_seperation=maximum_sampling_bin_seperation,
strain_tensor=strain_tensor)
# Compare Euler angle distributions to scipy random uniform orientation
# sampler
euler1 = np.array([Rotation.from_matrix(U).as_euler(
'xyz', degrees=True) for U in polycrystal.orientation_lab])
euler2 = Rotation.random(10 * euler1.shape[0]).as_euler('xyz')
for i in range(3):
hist1, bins = np.histogram(euler1[:, i])
hist2, bins = np.histogram(euler2[:, i])
hist2 = hist2 / 10.
# These histograms should look roughly the same
self.assertLessEqual(
np.max(
np.abs(
hist1 -
hist2)),
number_of_crystals *
0.05,
"ODF not sampled correctly.")
parameters = {
"detector_distance": 191023.9164,
"detector_center_pixel_z": 256.2345,
"detector_center_pixel_y": 255.1129,
"pixel_side_length_z": 181.4234,
"pixel_side_length_y": 180.2343,
"number_of_detector_pixels_z": 512,
"number_of_detector_pixels_y": 512,
"wavelength": 0.285227,
"beam_side_length_z": 512 * 200.,
"beam_side_length_y": 512 * 200.,
"rotation_step": np.radians(20.0),
"rotation_axis": np.array([0., 0., 1.0])
}
beam, detector, motion = templates.s3dxrd(parameters)
number_of_crystals = 100
sample_bounding_cylinder_height = 256 * 180 / 128.
sample_bounding_cylinder_radius = 256 * 180 / 128.
polycrystal = templates.polycrystal_from_odf(
orientation_density_function,
number_of_crystals,
sample_bounding_cylinder_height,
sample_bounding_cylinder_radius,
unit_cell,
sgname,
path_to_cif_file=None,
maximum_sampling_bin_seperation=maximum_sampling_bin_seperation,
strain_tensor=strain_tensor)
polycrystal.transform(motion, time=0.134)
polycrystal.diffract(
beam,
detector,
motion,
min_bragg_angle=0,
max_bragg_angle=None,
verbose=True)
diffraction_pattern = detector.render(
frame_number=0,
lorentz=False,
polarization=False,
structure_factor=False,
method="centroid",
verbose=True)
bins, histogram = utils._diffractogram(
diffraction_pattern, parameters['detector_center_pixel_z'], parameters['detector_center_pixel_y'])
histogram[histogram < 0.5 * np.median(histogram)] = 0
csequence, nosequences = 0, 0
for i in range(histogram.shape[0]):
if histogram[i] > 0:
csequence += 1
elif csequence >= 1:
nosequences += 1
csequence = 0
self.assertGreaterEqual(
nosequences,
10,
msg="Few or no rings appeared from diffraction.")
def test_get_uniform_powder_sample(self):
sample_bounding_radius = 256 * 180 / 128.
polycrystal = templates.get_uniform_powder_sample(
sample_bounding_radius,
number_of_grains=50,
unit_cell=[4.926, 4.926, 5.4189, 90., 90., 120.],
sgname='P3221',
strain_tensor= | np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0.01]]) | numpy.array |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import List, Optional, Union
import numpy as np
import numpy.typing as npt
import scipy.stats as st
def pareto_efficiency_mask(X: npt.NDArray[np.float32]) -> np.ndarray:
"""
Evaluates for each allocation in the provided array whether it is Pareto efficient. The costs
are assumed to be improved by lowering them.
Args:
X: Array of shape [N, D] (N: number of allocations, D: number of costs) containing the
allocations to check.
Returns:
A boolean array of shape [N], indicating for each allocation whether it is Pareto
efficient.
"""
# First, we assume that all allocations are Pareto efficient, i.e. not dominated
mask = np.ones(X.shape[0], dtype=bool)
# Then, we iterate over all allocations A and check which are dominated by then current
# allocation A. If it is, we don't need to check it against another allocation.
for i, allocation in enumerate(X):
# Only consider allocation if it hasn't been dominated yet
if mask[i]:
# An allocation is dominated by A if all costs are equal or lower and at least one cost
# is strictly lower. Using that definition, A cannot be dominated by itself.
dominated = np.all(allocation <= X[mask], axis=1) * np.any(
allocation < X[mask], axis=1
)
mask[mask] = ~dominated
return mask
def epsilon_net_indices(
X: npt.NDArray[np.float32], dim: Optional[int] = None
) -> np.ndarray:
"""
Outputs an order of the items in the provided array such that the items are spaced well. This
means that after choosing a seed item, the next item is chosen to be the farthest from the seed
item. The third item is then chosen to maximize the distance to the existing points and so on.
This algorithm is taken from "Nearest-Neighbor Searching and Metric Space Dimensions"
(Clarkson, 2005, p.17).
Args:
X: Array of shape [N, D] (N: number of items, D: dimensionality) with the items to
sparsify.
dim: The index of the dimension which to use to choose the seed item. If `None`, an item is
chosen at random, otherwise the item with the lowest value in the specified dimension
is used.
Returns:
An array of shape [N] providing the item indices that define a sparsified order of the
items.
"""
indices = set(range(X.shape[0]))
# Choose the seed item according to dim
if dim is None:
seed = | np.random.choice(X.shape[0]) | numpy.random.choice |
import time
from pyscf import scf
import os, time
import numpy as np
from mldftdat.lowmem_analyzers import RHFAnalyzer, UHFAnalyzer
from mldftdat.workflow_utils import get_save_dir, SAVE_ROOT, load_mol_ids
from mldftdat.density import get_exchange_descriptors2, LDA_FACTOR, GG_AMIN
from mldftdat.data import get_unique_coord_indexes_spherical
import logging
import yaml
from argparse import ArgumentParser
"""
Script to compile a dataset from the CIDER DB for training a CIDER functional.
"""
def compile_dataset2(DATASET_NAME, MOL_IDS, SAVE_ROOT, CALC_TYPE, FUNCTIONAL, BASIS,
spherical_atom=False, locx=False, lam=0.5,
version='a', **gg_kwargs):
all_descriptor_data = None
all_rho_data = None
all_values = []
all_weights = []
cutoffs = []
if locx:
raise ValueError('locx setting not supported in this version! (but might be later)')
Analyzer = loc_analyzers.UHFAnalyzer if 'U' in CALC_TYPE \
else loc_analyzers.RHFAnalyzer
else:
Analyzer = UHFAnalyzer if 'U' in CALC_TYPE else RHFAnalyzer
for MOL_ID in MOL_IDS:
logging.info('Computing descriptors for {}'.format(MOL_ID))
data_dir = get_save_dir(SAVE_ROOT, CALC_TYPE, BASIS, MOL_ID, FUNCTIONAL)
start = time.monotonic()
analyzer = Analyzer.load(data_dir + '/data.hdf5')
analyzer.get_ao_rho_data()
if type(analyzer.calc) == scf.hf.RHF:
restricted = True
else:
restricted = False
end = time.monotonic()
logging.info('Analyzer load time {}'.format(end - start))
if spherical_atom:
start = time.monotonic()
indexes = get_unique_coord_indexes_spherical(analyzer.grid.coords)
end = time.monotonic()
logging.info('Index scanning time {}'.format(end - start))
start = time.monotonic()
if restricted:
descriptor_data = get_exchange_descriptors2(
analyzer, restricted=True, version=version,
**gg_kwargs
)
else:
descriptor_data_u, descriptor_data_d = \
get_exchange_descriptors2(
analyzer, restricted=False, version=version,
**gg_kwargs
)
descriptor_data = np.append(descriptor_data_u, descriptor_data_d,
axis = 1)
end = time.monotonic()
logging.info('Get descriptor time {}'.format(end - start))
if locx:
logging.info('Getting loc fx with lambda={}'.format(lam))
values = analyzer.get_loc_fx_energy_density(lam = lam, overwrite=True)
if not restricted:
values = 2 * np.append(analyzer.loc_fx_energy_density_u,
analyzer.loc_fx_energy_density_d)
else:
values = analyzer.get_fx_energy_density()
if not restricted:
values = 2 * np.append(analyzer.fx_energy_density_u,
analyzer.fx_energy_density_d)
rho_data = analyzer.rho_data
if not restricted:
rho_data = 2 * np.append(rho_data[0], rho_data[1], axis=1)
if spherical_atom:
values = values[indexes]
descriptor_data = descriptor_data[:,indexes]
rho_data = rho_data[:,indexes]
weights = analyzer.grid.weights[indexes]
else:
weights = analyzer.grid.weights
if all_descriptor_data is None:
all_descriptor_data = descriptor_data
else:
all_descriptor_data = np.append(all_descriptor_data, descriptor_data,
axis = 1)
if all_rho_data is None:
all_rho_data = rho_data
else:
all_rho_data = np.append(all_rho_data, rho_data, axis=1)
all_values = np.append(all_values, values)
all_weights = np.append(all_weights, weights)
if not restricted:
# two copies for unrestricted case
all_weights = np.append(all_weights, weights)
cutoffs.append(all_values.shape[0])
DATASET_NAME = os.path.basename(DATASET_NAME)
save_dir = os.path.join(SAVE_ROOT, 'DATASETS',
FUNCTIONAL, BASIS, version, DATASET_NAME)
if not os.path.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
rho_file = os.path.join(save_dir, 'rho.npy')
desc_file = os.path.join(save_dir, 'desc.npy')
val_file = os.path.join(save_dir, 'val.npy')
wt_file = os.path.join(save_dir, 'wt.npy')
cut_file = os.path.join(save_dir, 'cut.npy')
np.save(rho_file, all_rho_data)
np.save(desc_file, all_descriptor_data)
np.save(val_file, all_values)
np.save(wt_file, all_weights)
np.save(cut_file, | np.array(cutoffs) | numpy.array |
import random
import h5py
import numpy as np
import tensorflow as tf
import Preproc
import Layer
import Net
def loadHDF5():
with h5py.File('CIFAR10.h5', 'r') as f:
dataTrain = np.array(f['Train']['images'])
labelsTrain = np.array(f['Train']['labels'])
dataTest = | np.array(f['Test']['images']) | numpy.array |
import numpy as np
from scipy.constants import c, e, m_p
from .machines import Synchrotron
from .SPS import SPSOctupoles
class PSB(Synchrotron):
def __init__(self, *args, **kwargs):
if 'n_segments' not in list(kwargs.keys()):
raise ValueError('Number of segments must be specified')
if 'machine_configuration' not in list(kwargs.keys()):
raise ValueError('machine_configuration must be specified')
self.n_segments = kwargs['n_segments']
self.machine_configuration = kwargs['machine_configuration']
self.circumference = 50*np.pi
self.s = (np.arange(0, self.n_segments + 1)
* self.circumference / self.n_segments)
if self.machine_configuration == '160MeV':
self.charge = e
self.mass = m_p
self.gamma = 160e6*e/(self.mass*c**2) + 1
self.Q_x = 4.23
self.Q_y = 4.37
self.Qp_x = [-1*self.Q_x]
self.Qp_y = [-2*self.Q_y]
self.app_x = 0.0000e-9
self.app_y = 0.0000e-9
self.app_xy = 0
self.alpha_x = 0 * np.ones(self.n_segments + 1)
self.beta_x = self.circumference/(2*np.pi*self.Q_x) * np.ones(self.n_segments + 1)
self.D_x = 0 * np.ones(self.n_segments + 1)
self.alpha_y = 0 * np.ones(self.n_segments + 1)
self.beta_y = self.circumference/(2*np.pi*self.Q_y) * | np.ones(self.n_segments + 1) | numpy.ones |
import torch
import random
import functools
import numpy as np
import SimpleITK as sitk
from skimage.transform import resize
from skimage.morphology import label
import src.data.transforms
def compose(transforms=None):
"""Compose several transforms together.
Args:
transforms (Box): The preprocessing and augmentation techniques applied to the data (default: None, only contain the default transform ToTensor).
Returns:
transforms (Compose): The list of BaseTransform.
"""
if transforms is None:
return Compose([ToTensor()])
_transforms = []
for transform in transforms:
cls = getattr(src.data.transforms, transform.name)
kwargs = transform.get('kwargs')
_transforms.append(cls(**kwargs) if kwargs else cls())
transforms = Compose(_transforms)
return transforms
class BaseTransform:
"""The base class for all transforms.
"""
def __call__(self, *imgs, **kwargs):
raise NotImplementedError
def __repr__(self):
return self.__class__.__name__
class Compose(BaseTransform):
"""Compose several transforms together.
Args:
transforms (Box): The preprocessing and augmentation techniques applied to the data.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, *imgs, **kwargs):
"""
Args:
imgs (tuple of numpy.ndarray): The images to be transformed.
Returns:
imgs (tuple of torch.Tensor): The transformed images.
"""
for transform in self.transforms:
imgs = transform(*imgs, **kwargs)
# Returns the torch.Tensor instead of a tuple of torch.Tensor if there is only one image.
if len(imgs) == 1:
imgs = imgs[0]
return imgs
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class ToTensor(BaseTransform):
"""Convert a tuple of numpy.ndarray to a tuple of torch.Tensor.
"""
def __call__(self, *imgs, dtypes=None, **kwargs):
"""
Args:
imgs (tuple of numpy.ndarray): The images to be converted to tensor.
dtypes (sequence of torch.dtype, optional): The corresponding dtype of the images (default: None, transform all the images' dtype to torch.float).
Returns:
imgs (tuple of torch.Tensor): The converted images.
"""
if not all(isinstance(img, np.ndarray) for img in imgs):
raise TypeError('All of the images should be numpy.ndarray.')
if dtypes:
if not all(isinstance(dtype, torch.dtype) for dtype in dtypes):
raise TypeError('All of the dtypes should be torch.dtype.')
if len(dtypes) != len(imgs):
raise ValueError('The number of the dtypes should be the same as the images.')
imgs = tuple(img.to(dtype) for img, dtype in zip(map(torch.from_numpy, imgs), dtypes))
else:
imgs = tuple(img.float() for img in map(torch.from_numpy, imgs))
return imgs
class Normalize(BaseTransform):
"""Normalize a tuple of images with the means and the standard deviations.
Args:
means (list, optional): A sequence of means for each channel (default: None).
stds (list, optional): A sequence of standard deviations for each channel (default: None).
"""
def __init__(self, means=None, stds=None):
if means is None and stds is None:
pass
elif means is not None and stds is not None:
if len(means) != len(stds):
raise ValueError('The number of the means should be the same as the standard deviations.')
else:
raise ValueError('Both the means and the standard deviations should have values or None.')
self.means = means
self.stds = stds
def __call__(self, *imgs, normalize_tags=None, **kwargs):
"""
Args:
imgs (tuple of numpy.ndarray): The images to be normalized.
normalize_tags (sequence of bool, optional): The corresponding tags of the images (default: None, normalize all the images).
Returns:
imgs (tuple of numpy.ndarray): The normalized images.
"""
if not all(isinstance(img, np.ndarray) for img in imgs):
raise TypeError('All of the images should be numpy.ndarray.')
if normalize_tags:
if len(normalize_tags) != len(imgs):
raise ValueError('The number of the tags should be the same as the images.')
if not all(normalize_tag in [True, False] for normalize_tag in normalize_tags):
raise ValueError("All of the tags should be either True or False.")
else:
normalize_tags = [None] * len(imgs)
_imgs = []
for img, normalize_tag in zip(imgs, normalize_tags):
if normalize_tag is None or normalize_tag is True:
if self.means is None and self.stds is None: # Apply image-level normalization.
axis = tuple(range(img.ndim - 1))
means = img.mean(axis=axis)
stds = img.std(axis=axis)
img = self._normalize(img, means, stds)
else:
img = self._normalize(img, self.means, self.stds)
elif normalize_tag is False:
pass
_imgs.append(img)
imgs = tuple(_imgs)
return imgs
@staticmethod
def _normalize(img, means, stds):
"""Normalize the image with the means and the standard deviations.
Args:
img (numpy.ndarray): The image to be normalized.
means (list): A sequence of means for each channel.
stds (list): A sequence of standard deviations for each channel.
Returns:
img (numpy.ndarray): The normalized image.
"""
img = img.copy()
for c, mean, std in zip(range(img.shape[-1]), means, stds):
img[..., c] = (img[..., c] - mean) / (std + 1e-10)
return img
class Resize(BaseTransform):
"""Resize a tuple of images to the same size.
Args:
size (list): The desired output size of the resized images.
"""
def __init__(self, size):
self.size = size
self._resize = functools.partial(resize, mode='constant', preserve_range=True)
def __call__(self, *imgs, resize_orders=None, **kwargs):
"""
Args:
imgs (tuple of numpy.ndarray): The images to be resized.
resize_orders (sequence of int, optional): The corresponding interpolation order of the images (default: None, the interpolation order would be 1 for all the images).
Returns:
imgs (tuple of numpy.ndarray): The resized images.
"""
if not all(isinstance(img, np.ndarray) for img in imgs):
raise TypeError('All of the images should be numpy.ndarray.')
ndim = imgs[0].ndim
if ndim - 1 != len(self.size):
raise ValueError(f'The dimensions of the resized size should be the same as the image ({ndim - 1}). Got {len(self.size)}')
if resize_orders:
imgs = tuple(self._resize(img, self.size, order).astype(img.dtype) for img, order in zip(imgs, resize_orders))
else:
imgs = tuple(self._resize(img, self.size) for img in imgs)
return imgs
class RandomCrop(BaseTransform):
"""Crop a tuple of images at the same random location.
Args:
size (list): The desired output size of the cropped images.
"""
def __init__(self, size):
self.size = size
def __call__(self, *imgs, **kwargs):
"""
Args:
imgs (tuple of numpy.ndarray): The images to be cropped.
Returns:
imgs (tuple of numpy.ndarray): The cropped images.
"""
if not all(isinstance(img, np.ndarray) for img in imgs):
raise TypeError('All of the images should be numpy.ndarray.')
if not all(img.ndim == 3 for img in imgs) and not all(img.ndim == 4 for img in imgs):
raise ValueError("All of the images' dimensions should be 3 (2D images) or 4 (3D images).")
ndim = imgs[0].ndim
if ndim - 1 != len(self.size):
raise ValueError(f'The dimensions of the cropped size should be the same as the image ({ndim - 1}). Got {len(self.size)}')
if ndim == 3:
h0, hn, w0, wn = self._get_coordinates(imgs[0], self.size)
imgs = tuple([img[h0:hn, w0:wn] for img in imgs])
elif ndim == 4:
h0, hn, w0, wn, d0, dn = self._get_coordinates(imgs[0], self.size)
imgs = tuple([img[h0:hn, w0:wn, d0:dn] for img in imgs])
return imgs
@staticmethod
def _get_coordinates(img, size):
"""Compute the coordinates of the cropped image.
Args:
img (numpy.ndarray): The image to be cropped.
size (list): The desired output size of the cropped image.
Returns:
coordinates (tuple): The coordinates of the cropped image.
"""
if any(i - j < 0 for i, j in zip(img.shape, size)):
raise ValueError(f'The image ({img.shape}) is smaller than the cropped size ({size}). Please use a smaller cropped size.')
if img.ndim == 3:
h, w = img.shape[:-1]
ht, wt = size
h0, w0 = random.randint(0, h - ht), random.randint(0, w - wt)
return h0, h0 + ht, w0, w0 + wt
elif img.ndim == 4:
h, w, d = img.shape[:-1]
ht, wt, dt = size
h0, w0, d0 = random.randint(0, h - ht), random.randint(0, w - wt), random.randint(0, d - dt)
return h0, h0 + ht, w0, w0 + wt, d0, d0 + dt
class PositiveCrop(BaseTransform):
"""Set a probability (positive_sampling_rate) where we ensure the sampled images at least put a target pixel in the middle during training.
Args:
positive_sampling_rate (float): The probability to select the sample around the target pixel.
size (list): The desired output size of the cropped images.
"""
def __init__(self, positive_sampling_rate, size):
self.positive_sampling_rate = positive_sampling_rate
self.size = size
def __call__(self, *imgs, target=None, target_label=None, **kwargs):
"""
Args:
imgs (tuple of numpy.ndarray): The images to be cropped.
target (numpy.ndarray): The reference target to determine sampling area.
target_label (int): The target label of the target image which is needed to be focused.
Returns:
imgs (tuple of numpy.ndarray): The cropped images.
"""
if not all(isinstance(img, np.ndarray) for img in imgs):
raise TypeError('All of the images should be numpy.ndarray.')
if not all(img.ndim == 3 for img in imgs) and not all(img.ndim == 4 for img in imgs):
raise ValueError("All of the images' dimensions should be 3 (2D images) or 4 (3D images).")
if not all(target.shape == img.shape for img in imgs):
raise ValueError("All of the images' shapes should be same as the target image.")
ndim = imgs[0].ndim
if ndim - 1 != len(self.size):
raise ValueError(f'The dimensions of the cropped size should be the same as the image ({ndim - 1}). Got {len(self.size)}')
starts, ends = self._get_coordinates(target, target_label, self.positive_sampling_rate, self.size)
if ndim == 3:
imgs = tuple([img[starts[0]:ends[0]+1, starts[1]:ends[1]+1] for img in imgs])
elif ndim == 4:
imgs = tuple([img[starts[0]:ends[0]+1, starts[1]:ends[1]+1, starts[2]:ends[2]+1] for img in imgs])
return imgs
@staticmethod
def _get_coordinates(target, target_label, positive_sampling_rate, size):
"""Compute the coordinates of the cropped image.
Args:
target (numpy.ndarray): The referenced image.
target_label (int): The target label of the target image which is needed to be focused.
positive_sampling_rate (float): The probability to select the sample around the target pixel.
size (list): The desired output size of the cropped images.
Returns:
coordinates (tuple): The coordinates of the cropped image.
"""
if any(i - j < 0 for i, j in zip(target.shape, size)):
raise ValueError(f'The target image ({target.shape}) is smaller than the cropped size ({size}). Please use a smaller cropped size.')
sample_rate = random.uniform(0, 1)
target = target.squeeze(axis=-1)
starts, ends = [], []
if sample_rate <= positive_sampling_rate:
# sample a target object
target = np.where(target == target_label, 1, 0)
label_target = label(target, connectivity=target.ndim)
target_list = np.unique(label_target)[1:]
target_id = random.choice(target_list)
# sample a pixel from the selected target
positive_list = np.where(label_target == target_id)
positive_index = random.choice(range(len(positive_list[0])))
for i in range(target.ndim):
start = positive_list[i][positive_index] - size[i] // 2
start = max(0, start)
end = start + size[i] - 1
if end >= target.shape[i]:
end = target.shape[i] - 1
start = end - size[i] + 1
starts.append(start)
ends.append(end)
else:
for i in range(target.ndim):
start = random.randint(0, target.shape[i] - size[i])
end = start + size[i] - 1
starts.append(start)
ends.append(end)
return starts, ends
class RandomElasticDeformation(BaseTransform):
"""Do the random elastic deformation as used in U-Net and V-Net by using the bspline transform.
Args:
do_z_deformation (bool, optional): Whether to apply the deformation along the z dimension (default: False).
num_ctrl_points (int, optional): The number of the control points to form the control point grid (default: 4).
sigma (int or float, optional): The number to determine the extent of deformation (default: 15).
prob (float, optional): The probability of applying the deformation (default: 0.5).
"""
def __init__(self, do_z_deformation=False, num_ctrl_points=4, sigma=15, prob=0.5):
self.do_z_deformation = do_z_deformation
self.num_ctrl_points = max(num_ctrl_points, 2)
self.sigma = max(sigma, 1)
self.prob = max(0, min(prob, 1))
self.bspline_transform = None
def __call__(self, *imgs, elastic_deformation_orders=None, **kwargs):
"""
Args:
imgs (tuple of numpy.ndarray): The images to be deformed.
elastic_deformation_orders (sequence of int, optional): The corresponding interpolation order of the images (default: None, the interpolation order would be 3 for all the images).
Returns:
imgs (tuple of numpy.ndarray): The deformed images.
"""
if not all(isinstance(img, np.ndarray) for img in imgs):
raise TypeError('All of the images should be numpy.ndarray.')
if not all(img.ndim == 3 for img in imgs) and not all(img.ndim == 4 for img in imgs):
raise ValueError("All of the images' dimensions should be 3 (2D images) or 4 (3D images).")
if random.random() < self.prob:
self._init_bspline_transform(imgs[0].shape)
if elastic_deformation_orders:
imgs = tuple(self._apply_bspline_transform(img, order) for img, order in zip(imgs, elastic_deformation_orders))
else:
imgs = map(self._apply_bspline_transform, imgs)
return imgs
def _init_bspline_transform(self, shape):
"""Initialize the bspline transform.
Args:
shape (tuple): The size of the control point grid.
"""
# Remove the channel dimension.
shape = shape[:-1]
# Initialize the control point grid.
img = sitk.GetImageFromArray(np.zeros(shape))
mesh_size = [self.num_ctrl_points] * img.GetDimension()
self.bspline_transform = sitk.BSplineTransformInitializer(img, mesh_size)
# Set the parameters of the bspline transform randomly.
params = self.bspline_transform.GetParameters()
params = | np.asarray(params, dtype=np.float64) | numpy.asarray |
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2019 by <NAME> <<EMAIL>>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Classes for ADMM algorithms for convolutional sparse coding with
Total Variation regularisation terms"""
from __future__ import division, print_function
from builtins import range
import copy
import numpy as np
from sporco.admm import admm
import sporco.cnvrep as cr
from sporco.admm import cbpdn
import sporco.linalg as sl
import sporco.prox as sp
from sporco.util import u
__author__ = """<NAME> <<EMAIL>>"""
class ConvBPDNScalarTV(admm.ADMM):
r"""
ADMM algorithm for an extension of Convolutional BPDN including
terms penalising the total variation of each coefficient map
:cite:`wohlberg-2017-convolutional`.
|
.. inheritance-diagram:: ConvBPDNScalarTV
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{x} \; \frac{1}{2}
\left\| \sum_m \mathbf{d}_m * \mathbf{x}_m - \mathbf{s}
\right\|_2^2 + \lambda \sum_m \| \mathbf{x}_m \|_1 +
\mu \sum_m \left\| \sqrt{\sum_i (G_i \mathbf{x}_m)^2} \right\|_1
\;\;,
where :math:`G_i` is an operator computing the derivative along index
:math:`i`, via the ADMM problem
.. math::
\mathrm{argmin}_\mathbf{x} \; (1/2) \left\| D \mathbf{x} -
\mathbf{s} \right\|_2^2 + \lambda
\| \mathbf{y}_L \|_1 + \mu \sum_m \left\| \sqrt{\sum_{i=0}^{L-1}
\mathbf{y}_i^2} \right\|_1 \quad \text{ such that } \quad
\left( \begin{array}{c} \Gamma_0 \\ \Gamma_1 \\ \vdots \\ I
\end{array} \right) \mathbf{x} =
\left( \begin{array}{c} \mathbf{y}_0 \\
\mathbf{y}_1 \\ \vdots \\ \mathbf{y}_L \end{array}
\right) \;\;,
where
.. math::
D = \left( \begin{array}{ccc} D_0 & D_1 & \ldots \end{array} \right)
\qquad
\mathbf{x} = \left( \begin{array}{c} \mathbf{x}_0 \\ \mathbf{x}_1 \\
\vdots \end{array} \right) \qquad
\Gamma_i = \left( \begin{array}{ccc}
G_i & 0 & \ldots \\ 0 & G_i & \ldots \\ \vdots & \vdots & \ddots
\end{array} \right) \;\;.
For multi-channel signals with a single-channel dictionary, scalar TV is
applied independently to each coefficient map for channel :math:`c` and
filter :math:`m`. Since multi-channel signals with a multi-channel
dictionary also have one coefficient map per filter, the behaviour is
the same as for single-channel signals.
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`(1/2) \| \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \|_2^2`
``RegL1`` : Value of regularisation term :math:`\sum_m \|
\mathbf{x}_m \|_1`
``RegTV`` : Value of regularisation term :math:`\sum_m \left\|
\sqrt{\sum_i (G_i \mathbf{x}_m)^2} \right\|_1`
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``XSlvRelRes`` : Relative residual of X step solver
``Time`` : Cumulative run time
"""
class Options(cbpdn.ConvBPDN.Options):
r"""ConvBPDNScalarTV algorithm options
Options include all of those defined in
:class:`.admm.cbpdn.ConvBPDN.Options`, together with additional
options:
``TVWeight`` : An array of weights :math:`w_m` for the term
penalising the gradient of the coefficient maps. If this
option is defined, the regularization term is :math:`\sum_m w_m
\left\| \sqrt{\sum_i (G_i \mathbf{x}_m)^2} \right\|_1`
where :math:`w_m` is the weight for filter index :math:`m`. The
array should be an :math:`M`-vector where :math:`M` is the number
of filters in the dictionary.
"""
defaults = copy.deepcopy(cbpdn.ConvBPDN.Options.defaults)
defaults.update({'TVWeight' : 1.0})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvBPDNScalarTV algorithm options
"""
if opt is None:
opt = {}
cbpdn.ConvBPDN.Options.__init__(self, opt)
itstat_fields_objfn = ('ObjFun', 'DFid', 'RegL1', 'RegTV')
itstat_fields_extra = ('XSlvRelRes',)
hdrtxt_objfn = ('Fnc', 'DFid', u('Regℓ1'), u('RegTV'))
hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid',
u('Regℓ1'): 'RegL1', u('RegTV'): 'RegTV'}
def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/cbpdnstv_init.svg
:width: 20%
:target: ../_static/jonga/cbpdnstv_init.svg
|
Parameters
----------
D : array_like
Dictionary matrix
S : array_like
Signal vector or matrix
lmbda : float
Regularisation parameter (l1)
mu : float
Regularisation parameter (gradient)
opt : :class:`ConvBPDNScalarTV.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
if opt is None:
opt = ConvBPDNScalarTV.Options()
# Infer problem dimensions and set relevant attributes of self
self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)
# Call parent class __init__
Nx = np.product(np.array(self.cri.shpX))
yshape = self.cri.shpX + (len(self.cri.axisN)+1,)
super(ConvBPDNScalarTV, self).__init__(Nx, yshape, yshape,
S.dtype, opt)
# Set l1 term scaling and weight array
self.lmbda = self.dtype.type(lmbda)
self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)
self.Wl1 = self.Wl1.reshape(cr.l1Wshape(self.Wl1, self.cri))
self.mu = self.dtype.type(mu)
if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0:
self.Wtv = np.asarray(opt['TVWeight'].reshape(
(1,)*(dimN + 2) + opt['TVWeight'].shape), dtype=self.dtype)
else:
# Wtv is a scalar: no need to change shape
self.Wtv = np.asarray(opt['TVWeight'], dtype=self.dtype)
# Set penalty parameter
self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0),
dtype=self.dtype)
# Set rho_xi attribute
self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=1.0,
dtype=self.dtype)
# Reshape D and S to standard layout
self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype)
self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)
# Compute signal in DFT domain
self.Sf = sl.rfftn(self.S, None, self.cri.axisN)
self.Gf, GHGf = sl.gradient_filters(self.cri.dimN+3, self.cri.axisN,
self.cri.Nv, dtype=self.dtype)
self.GHGf = self.Wtv**2 * GHGf
# Initialise byte-aligned arrays for pyfftw
self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)
self.Xf = sl.pyfftw_rfftn_empty_aligned(self.cri.shpX, self.cri.axisN,
self.dtype)
self.setdict()
def setdict(self, D=None):
"""Set dictionary array."""
if D is not None:
self.D = np.asarray(D, dtype=self.dtype)
self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)
# Compute D^H S
self.DSf = np.conj(self.Df) * self.Sf
if self.cri.Cd > 1:
self.DSf = np.sum(self.DSf, axis=self.cri.axisC, keepdims=True)
if self.opt['HighMemSolve'] and self.cri.Cd == 1:
self.c = sl.solvedbi_sm_c(
self.Df, np.conj(self.Df), self.rho*self.GHGf + self.rho,
self.cri.axisM)
else:
self.c = None
def rhochange(self):
"""Updated cached c array when rho changes."""
if self.opt['HighMemSolve'] and self.cri.Cd == 1:
self.c = sl.solvedbi_sm_c(
self.Df, np.conj(self.Df), self.rho*self.GHGf + self.rho,
self.cri.axisM)
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`."""
self.YU[:] = self.Y - self.U
YUf = sl.rfftn(self.YU, None, self.cri.axisN)
# The sum is over the extra axis indexing spatial gradient
# operators G_i, *not* over axisM
b = self.DSf + self.rho*(YUf[..., -1] + self.Wtv * np.sum(
| np.conj(self.Gf) | numpy.conj |
import numpy as np
from astropy.io import ascii
import athena_read as ar
def read_trackfile(fn,m1=0,m2=0):
orb=ascii.read(fn)
print( "reading orbit file for planet wind simulation...")
if m1==0:
m1 = orb['m1']
if m2==0:
m2 = orb['m2']
orb['sep'] = np.sqrt(orb['x']**2 + orb['y']**2 + orb['z']**2)
orb['r'] = np.array([orb['x'],orb['y'],orb['z']]).T
orb['rhat'] = np.array([orb['x']/orb['sep'],orb['y']/orb['sep'],orb['z']/orb['sep']]).T
orb['v'] = np.array([orb['vx'],orb['vy'],orb['vz']]).T
orb['vmag'] = np.linalg.norm(orb['v'],axis=1)
orb['vhat'] = np.array([orb['vx']/orb['vmag'],orb['vy']/orb['vmag'],orb['vz']/orb['vmag']]).T
orb['xcom'] = m2*orb['x']/(m1+m2)
orb['ycom'] = m2*orb['y']/(m1+m2)
orb['zcom'] = m2*orb['z']/(m1+m2)
orb['vxcom'] = m2*orb['vx']/(m1+m2)
orb['vycom'] = m2*orb['vy']/(m1+m2)
orb['vzcom'] = m2*orb['vz']/(m1+m2)
orb['rcom'] = | np.array([orb['xcom'],orb['ycom'],orb['zcom']]) | numpy.array |
"""Python library for GCCR002"""
from contextlib import contextmanager
from datetime import datetime
import hashlib
from io import StringIO
from IPython.display import display as _display
from itertools import chain, product, combinations_with_replacement
import joblib
import json
import logging
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import networkx as nx
import numpy as np
import pandas as pd
import pathlib
import pickle
import pingouin
import re
from scipy.special import logit
from scipy.stats import ks_2samp, mannwhitneyu, wilcoxon, gaussian_kde, chi2_contingency, entropy, norm
import seaborn as sns
from sklearn.decomposition import PCA, NMF
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE, RFECV
from sklearn.linear_model import LinearRegression, RidgeClassifier, RidgeClassifierCV, LogisticRegression, LogisticRegressionCV
from sklearn.metrics import auc, roc_curve, roc_auc_score, plot_roc_curve, confusion_matrix
from sklearn.metrics import precision_score, recall_score, get_scorer, make_scorer, SCORERS
from sklearn.model_selection import ShuffleSplit, GroupShuffleSplit, LeaveOneOut, cross_validate, cross_val_score, cross_val_predict
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.utils.class_weight import compute_sample_weight
from statsmodels.api import add_constant
from statsmodels.discrete.discrete_model import Logit
import sys
sys.path.append('/home/rgerkin/dev/pyvenn') #TODO: Turn pyvenn into a pip-installable package
from tqdm.auto import tqdm, trange
import urllib
from venn import venn3, venn4, venn5, get_labels
import warnings
import zipfile
sns.set(font_scale=1.1)
sns.set_style('whitegrid')
logger = logging.Logger('GCCR002')
known_md5s = {'GCCR002_complete_database.csv': 'd476f67b081dd9d8d8cf1ee0481ad4e8',
'GCCR002_DATA_COVID_TimeStamp.xlsx': 'aa016d9208fbb44ffd8ce4a2dfe908a4',
'GCCR002_DATA_COVID_TimeStamp_plusdataJuly.csv': '56922f025047e379bf5cfc8ff2ceed91'}
DATA = pathlib.Path('data')
YOUGOV_CUTOFF_DATE = '2020-07-03'
# In order to guarantee a match to the published figures, we must remove YouGov reported after the manuscript submission date.
# This corresponds to week 11. To update this figure with new data (collected by YouGov after manuscript submission),
# change max_week to a higher value (e.g. the present day)"""
# For each type (e.g. categorical), a list of regular expressions for features considered to be that type
dtype_ontology = {'categorical': ['Gender', 'GCCR', 'Referred', 'Test_Name'],
'discrete': ['Age', 'Days_since_onset', 'Onset_day', 'Completion_day', 'Recovery'],
'binary': ['Changes', 'Symptoms', 'Prior_conditions', 'cigarette(!=_f)', 'cigarette_use', 'Resp'],
'continuous': ['(?<!did_)(before_)', 'during_', 'after_', 'change_', 'recovery_', 'frequency', 'cigarette(?!_use)'],
}
feature_ontology = {'incidental': ['GCCR', 'Test_Name', 'Completion_', 'Referred'],
'chemosensory': ['Changes_in', 'Taste', 'Smell', 'Cheme', '_food', '_smell'],
'demographic': ['Gender', 'Age', 'Country'],
'history': ['Prior_conditions', 'cigarette'],
'typical': ['Symptoms', 'Resp', 'Recovery', 'Blocked', 'Onset_', 'Days_']
}
timing_ontology = {'incidental': ['GCCR', 'Test_Name', 'Day', '_day', 'Referred'],
'demographic': ['Gender', 'Age', 'Country'],
'before': ['Prior_conditions', 'before_illness', 'cigarette'],
'during': ['Changes_in', 'change_illness', 'during_illness', 'Resp', 'Symptoms'],
'after': ['Recovery', 'after_illness', 'recovery_illness']}
# Color scheme
colors = pd.Series(
index=pd.MultiIndex.from_tuples([], names=["diagnosis", "sense"]), dtype="object"
)
colors.loc["C19+", "Smell"] = "#6699CD"
colors.loc["C19-", "Smell"] = "#a5bcd4"
colors.loc["C19+", "Taste"] = "#ff9900"
colors.loc["C19-", "Taste"] = "#ffce85"
colors.loc["C19+", "Chemesthesis"] = "#009999"
colors.loc["C19-", "Chemesthesis"] = "#5fc7c7"
colors.loc["C19+", "Blocked_nose"] = "#996600"
colors.loc["C19-", "Blocked_nose"] = "#d1a752"
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
"""
A context manager that will prevent any logging messages
triggered during the body from being processed.
:param highest_level: the maximum logging level in use.
This would only need to be changed if a custom level greater than CRITICAL
is defined.
"""
# two kind-of hacks here:
# * can't get the highest logging level in effect => delegate to the user
# * can't get the current module-level override => use an undocumented
# (but non-private!) interface
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
def get_hash(x):
return joblib.hash(x)
def load_all():
# All of the content loaded here was produced in pre-analysis.ipynb
with open(DATA / 'processed' / 'data-types.json') as f:
dtypes = json.load(f)
df = pd.read_csv(DATA / 'processed' / 'data-clean.csv', dtype=dtypes, index_col=0)
Xu = pd.read_csv(DATA / 'processed' / 'X-raw.csv', index_col=0).astype('float')
Xn = pd.read_csv(DATA / 'processed' / 'X-normalized.csv', index_col=0).astype('float')
#Xu.index = Xu.index.astype(int)
#Xn.index = Xu.index.astype(int)
with open(DATA / 'processed' / 'targets.json') as f:
targets = json.load(f)
sets = {name: set(ids) for name, ids in targets.items()}
with open(DATA / 'processed' / 'classes.json') as f:
classes = json.load(f)
return df, Xu, Xn, dtypes, sets, classes
def load_raw():
#file_name = 'GCCR002_DATA_COVID_TimeStamp.xlsx'
#file_name = 'GCCR002_DATA_COVID_TimeStamp_plusdataJuly.csv'
#assert_md5(file_name) # Check that the MD5 hash of the file is as expected
#if file_name.endswith('.xlsx'):
# df = pd.read_excel(file_name) # Pandas takes forever to load Excel files
#elif file_name.endswith('.csv'):
# df = pd.read_csv(file_name)
df_ORIGINAL = pd.read_csv(DATA / 'raw' / 'GCCR002_DATA_COVID_TimeStamp.csv')
df_JULY = pd.read_csv(DATA / 'raw' / 'GCCR002_julydatabase_timestamp_Countryclean_labelscorrect.csv')
to_drop = ['UniqueID.1', 'UniqueID_1', 'Unnamed: 0', 'Unnamed: 2', 'Country_clean']
for df_ in [df_ORIGINAL, df_JULY]:
df_.drop(to_drop, axis=1, errors='ignore', inplace=True)
df_['Date_of_onset'] = pd.to_datetime(df_['Date_of_onset'])
df_['Year_of_birth_Time_Stamp'] = pd.to_datetime(df_['Year_of_birth_Time_Stamp'])
assert not set(df_ORIGINAL['UniqueID']).intersection(set(df_JULY['UniqueID']))
df = pd.concat([df_ORIGINAL, df_JULY[df_ORIGINAL.columns]])
df = df.rename(columns={'Chemethesis_before_illness': 'Chemesthesis_before_illness'})
assert len(set(df['UniqueID'])) == df.shape[0]
df = df.set_index('UniqueID')
df = df.drop('UniqueID.1', errors='ignore')
report_size(df, 'loading')
return df
def get_md5(file_name):
"""Get MD5 hash of file"""
with open(file_name, 'rb') as f:
# read contents of the file
data = f.read()
# pipe contents of the file through
md5 = hashlib.md5(data).hexdigest()
return md5
def assert_md5(file_name):
md5 = get_md5(file_name)
assert md5 == known_md5s[file_name], "MD5 hashes do not match; file may have been changed."
def date_to_integer_day(series):
series = series.dt.dayofyear
series = series.fillna(-1).astype(int)
return series
def display(x):
if isinstance(x, str):
print(x)
else:
_display(x)
def interp_index(array1, array2, threshold):
i = np.searchsorted(array1, threshold)
a1 = array1[i-1]
b1 = array1[i]
a2 = array2[i-1]
b2 = array2[i]
return a2 + (b2-a2)*(threshold-a1)/(b1-a1)
def plot_roc(clf, X, y, cv, cv_kwargs=None, weights=None, concat=True, ax=None, name=None, title=None):
# Plot ROC curve
roc_aucs = []
n = cv.get_n_splits()
cv_kwargs = {} if cv_kwargs is None else cv_kwargs
if ax is None:
plt.figure(figsize=(4,4))
ax = plt.gca()
y_score = []
y_true = []
all_weights = []
sample_weight_ = get_weights(X, y, weights)
for i, (train, test) in enumerate(cv.split(X, **cv_kwargs)):
#sample_weight = get_weights(X.iloc[train], y.iloc[train], weights)
sample_weight = sample_weight_.iloc[train]
clf.fit(X.iloc[train, :], y.iloc[train], sample_weight=sample_weight)
#sample_weight = get_weights(X.iloc[test], y.iloc[test], weights)
sample_weight = sample_weight_.iloc[test]
if hasattr(clf, 'predict_proba'):
y_score_ = clf.predict_proba(X.iloc[test, :])[:, 1]
else:
y_score_ = clf.decision_function(X.iloc[test, :])
if not concat:
curve = plot_roc_curve(clf, X.iloc[test, :], y.iloc[test],
alpha=(1/np.sqrt(n)), ax=ax,
sample_weight=sample_weight, name='Split %d' % i)
roc_aucs.append(curve.roc_auc)
else:
auc = roc_auc_score(y.iloc[test], y_score_)
roc_aucs.append(auc)
y_score += list(y_score_)
y_true += list(y.iloc[test])
all_weights += list(sample_weight)
score = np.mean(roc_aucs)
if concat:
fpr, tpr, thresholds = roc_curve(y_true, y_score, sample_weight=all_weights)
#score = roc_auc_score(y_true, y_score, sample_weight=all_weights)
if not name:
name = clf.__class__.__name__.replace('Classifier','').replace('Ridge', 'Linear')
sens_half = interp_index(fpr, tpr, 0.5)
spec_half = 1-interp_index(tpr, fpr, 0.5)
print("%s: Sens50 = %.3g, Spec50 = %.3g" % (name, sens_half, spec_half))
label = '%s: %.3g' % (name, score) if name else '%.3g' % score
ax.plot(fpr, tpr, label=label)
else:
ax.set_title('AUC = %.3f +/- %.3f' % (score, np.std(roc_aucs)/np.sqrt(n)))
ax.plot([0, 1], [0, 1], 'k--')
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
if title:
ax.set_title(title)
if n <= 10 or concat:
ax.legend(fontsize=12, loc=4)
return score
def rank_features(clf, X):
# Rank the features identified by the classifier from most to least important
key_features = pd.Series(clf.feature_importances_, index=X.columns).sort_values(ascending=False)
# Show the 20 most important
key_features.index = nicify(list(key_features.index))
return key_features.to_frame(name='Importance')
def rank_coefs(clf, X, nicify_=True):
key_features = pd.Series(clf.coef_.ravel(), index=X.columns)
if hasattr(clf, 'intercept_') and clf.intercept_:
key_features['Intercept'] = clf.intercept_[0]
kf = key_features.to_frame(name='Value')
kf['Magnitude'] = kf['Value'].abs().round(3)
kf['Sign'] = ['+' if x>=0 else '-' for x in kf['Value']]
kf = kf.sort_values('Magnitude', ascending=False)
kf = kf.drop('Value', axis=1)
kf = kf[kf['Magnitude']>0]
if nicify_:
kf.index = nicify(list(kf.index))
return kf
def compute_score(clf, X, y, cv):
# Apply cross-validation using this splitter, and check the following fitness metrics
results = cross_validate(clf, X, y, scoring=['roc_auc'], cv=cv)
for key in results:
print(key, results[key].mean())
def cardinality_filter(X, n, dtype=None):
cols = []
for col in X:
if dtype is None or X[col].dtype == dtype:
u = X[col].unique()
if len(u)>=n:
cols.append(col)
return cols
def ontology_to_classes(df, ontology, invert=False, add=None):
if add is None:
add = []
unassigned_cols = list(df.drop('id', errors='ignore'))
if invert:
classes = {x:[] for x in ontology}
else:
classes = {}
for key, patterns in ontology.items():
for pattern in patterns:
r = re.compile(pattern)
cols = list(filter(r.search, list(df)))
for col in cols:
if col in unassigned_cols:
if invert:
classes[key].append(col)
else:
classes[col] = key
unassigned_cols.remove(col)
assert len(unassigned_cols)==0, "%s were unassigned." % unassigned_cols
for kind in add:
# The above ontology maps each feature to a single class.
# Additiomal feature_classes below can reuse these features.
if kind == 'CDC9':
classes[kind] = ['Symptoms_%s' % x for x in
['changes_in_smell', 'changes_in_food_flavor', 'fever', 'muscle_aches',
'runny_nose', 'dry_cough', 'diarrhea', 'fatigue', 'difficulty_breathing_/_shortness_of_breath']]
if kind == 'CDC7':
classes[kind] = ['Symptoms_%s' % x for x in
['fever', 'muscle_aches',
'runny_nose', 'dry_cough', 'diarrhea', 'fatigue', 'difficulty_breathing_/_shortness_of_breath']]
if kind == 'CDC3':
classes[kind] = ['Symptoms_%s' % x for x in
['fever', 'dry_cough', 'difficulty_breathing_/_shortness_of_breath']]
elif kind == 'chemosensory-binary':
classes[kind] = [x for x in classes['chemosensory'] if 'illness' not in x]
return classes
def get_rccv_score(clf, X, y, feature_classes, classes, weights='balanced'):
sample_weight = get_weights(X, y, weights)
features = list(chain(*[feature_classes[x] for x in classes]))
clf.fit(X[features], y, sample_weight=sample_weight)
return clf.best_score_.round(3)
def roc(clf, X, y, feature_classes, classes, cv, weights=None, concat=True, ax=None, with_name=True, title=False):
features = list(chain(*[feature_classes[x] for x in classes]))
if with_name:
name = '%s' % '+'.join(classes)
score = plot_roc(clf, X[features], y, cv, weights=weights, concat=concat, ax=ax, name=name)
if ax and title:
if title is True:
title = '%s' % '+'.join(classes)
ax.set_title(title)
return score
def country_weights(X, y):
test_names = [col for col in X if 'Test_' in col]
sample_weight = y.copy()
sample_weight[:] = 1
for test_name in test_names:
m = X[test_name].mean() # Allows this to work even on standardized data
index = X[X[test_name]>m].index
if len(index):
weight = compute_sample_weight('balanced', y.loc[index])
sample_weight.loc[index] = weight
return sample_weight
def feature_weights(X, y, feature):
sample_weight = y.copy()
sample_weight[:] = 1
m = X[feature].mean() # Allows this to work even on standardized data
index = X[X[feature]>m].index
if len(index):
weight = compute_sample_weight('balanced', y.loc[index])
sample_weight.loc[index] = weight
return sample_weight
def get_weights(X, y, kind):
if isinstance(kind, pd.Series):
sample_weight = kind
elif kind == 'balanced-by-country':
sample_weight = country_weights(X, y)
elif kind == 'balanced':
sample_weight = compute_sample_weight('balanced', y)
elif kind:
sample_weight = compute_sample_weight('balanced', X[kind])
else:
sample_weight = compute_sample_weight(None, y)
sample_weight = pd.Series(sample_weight, index=X.index)
return sample_weight
def table_summarize(X, y, feature):
y.name = 'COVID status'
summary = X.join(y).groupby([feature, 'COVID status']).count().sum(axis=1).to_frame().unstack('COVID status')[0]
return summary.div(summary.sum()).round(2)
def hist_summarize(X, y, feature):
plt.hist(X.loc[y==1, feature], color='r', bins=30, alpha=0.3, density=True, label='COVID+');
plt.hist(X.loc[y==0, feature], color='g', bins=30, alpha=0.3, density=True, label='COVID-');
plt.legend()
def report_size(df, action):
print("Data after %s has %d subjects and %d features" % (action, *df.shape))
def qq_plot(X, y, feature):
x_minus = X[y==0][feature].quantile(np.linspace(0, 1, 101))
x_plus = X[y==1][feature].quantile(np.linspace(0, 1, 101))
ax = sns.lineplot(x_minus, x_plus)
ax.set_xlabel('%s (COVID -)' % feature.replace('_',' '))
ax.set_ylabel('%s (COVID +)' % feature.replace('_',' '))
ax.plot([0, max(x_minus)], [0, max(x_plus)], '--')
def pp_plot(X, y, feature, label=True, stabilized=False, ax=None):
x_minus = X[y==0][feature]
x_plus = X[y==1][feature]
minn = min(x_minus.min(), x_plus.min())
maxx = max(x_minus.max(), x_plus.max())
s_minus = pd.Series(index=np.linspace(minn-0.001, maxx+0.001, 200), dtype=float)
s_plus = pd.Series(index=np.linspace(minn-0.001, maxx+0.001, 200), dtype=float)
s_minus[:] = s_minus.index.map(lambda x: (x_minus<=x).mean())
s_plus[:] = s_plus.index.map(lambda x: (x_plus<=x).mean())
if stabilized:
s_minus = (2/np.pi)*np.arcsin(np.sqrt(s_minus))
s_plus = (2/np.pi)*np.arcsin(np.sqrt(s_plus))
D, p = ks_2samp(x_minus, x_plus)
#S, p = mannwhitneyu(x_minus, x_plus)
sign = (s_plus - s_minus).mean() > 0
#print(sign)
ax = sns.lineplot(s_minus, s_plus, ax=ax,
label='%s (D=%.2f)' % (feature.replace('_', ' ').title(), D if sign>0 else -D))
ax.set_xlabel('COVID -')
ax.set_ylabel('COVID +')
ax.plot([0, 1], [0, 1], 'k--')
ax.legend(fontsize=11)
def nicify(name, line_break=False):
if isinstance(name, list):
return list(map(nicify, name))
s = name.replace('_', ' ').title().replace('Symptoms ', '').split('/')[0].strip().replace('Gccr', 'GCCR v')\
.replace('Illness Y', 'Illness').replace('Before Illness', 'Before').replace('After Illness', 'After')\
.replace('Prior Conditions None', 'No Prior Conditions')\
.replace('Basic Tastes ', '').replace('Recovery Y', 'Recovered').replace('Prior Conditions ', '').split('(')[0]\
.replace('Changes In Smell I Cannot Smell At All', 'Anosmia/Hyposmia')\
.replace('Changes In Smell Smells Smell Different Than They Did Before', 'Parosmia')\
.replace("Changes In Smell I Can Smell Things That Aren'T There", 'Phantosmia')\
.replace('Changes In Smell Sense Of Smell Fluctuates', 'Smell Fluctuation')\
.replace('During Illness', 'During').replace(' Illness', '')\
.replace(' That Required Chemotherapy Or Radiation', '+Chemo/Radiation')\
.replace('Combustible Cigarette', 'Cigarette')\
.replace('E-Cigarette 30 Day', 'E-Cigarette')\
.replace(' That Did Not Require Chemotherapy Or Radiation', '-Chemo/Radiation')\
.replace('Results','').replace('Final','').replace('!','').replace('Version','').split('[')[0]\
.replace('Const', 'Intercept')\
.replace(' ', ' ').strip()
if line_break:
x = s.rfind(' ')
s = s[:x] + '\n' + s[x+1:]
return s
def nicify_labels(ax, x=True, y=True, line_break=True):
for xy in ['x', 'y']:
if locals()[xy]:
# Fix axis labels
z = getattr(ax, 'get_%slabel' % xy)()
new = nicify(z, line_break=line_break)
getattr(ax, 'set_%slabel' % xy)(new)
# Fix tick labels
z = getattr(ax, 'get_%sticklabels' % xy)()
new = [nicify(zi.get_text(), line_break=line_break)
if not zi.get_text().isnumeric() else zi.get_text()
for zi in z]
getattr(ax, 'set_%sticklabels' % xy)(new)
def fill_impute(df, feature_dtypes, copy=True):
if copy:
df = df.copy()
# Apply the following missing data handling and recasting rules.
for col, dtype in feature_dtypes.items():
if dtype == 'categorical':
df[col] = df[col].fillna('Missing').astype('object')
elif dtype == 'discrete':
df[col] = df[col].fillna(df[col].median()).astype(int)
elif dtype == 'binary':
df[col] = df[col].fillna(0.5).astype('float')
elif dtype == 'continuous':
df[col] = df[col].fillna(df[col].median()).astype('float')
return df
def plot_violin(X, y, feature, ax):
y.name = "COVID status"
Xy = X.join(y)
sns.violinplot(x="COVID status", y=feature, data=Xy, ax=ax, alpha=0.2)
ax.set_xlabel('')
ax.set_xticklabels(['COVID -', 'COVID +'], fontweight='bold')
ax.set_ylabel(nicify(feature), fontweight='bold')
def rescale(X):
# Create a version of X for which every column has mean 0, variance 1.
X_st = X.copy()
std_sclr = StandardScaler()
X_st[:] = std_sclr.fit_transform(X)
assert np.allclose(X_st.mean(), 0)
assert all(np.isclose(X_st.var(ddof=0), 1) + np.isclose(X_st.var(ddof=0), 0))
# Create a version of X for which every column has min 0, max 1.
mm_sclr = MinMaxScaler()
X_nm = X.copy()
X_nm[:] = mm_sclr.fit_transform(X)
return X_st, std_sclr, X_nm, mm_sclr
def lrcv_check(lrcv, X, y, features):
sample_weight = get_weights(X, y, 'balanced-by-country')
lrcv.fit(X[features], y, sample_weight=sample_weight)
return pd.DataFrame(lrcv.scores_[True].mean(axis=0).round(3),
index=pd.Series(lrcv.Cs_, name='C'),
columns=pd.Series(lrcv.l1_ratios_, name='L1 Ratio'))
def rccv_check(rccv, X, y, features):
sample_weight = get_weights(X, y, 'balanced-by-country')
rccv.fit(X[features], y, sample_weight=sample_weight)
return rccv.best_score_.round(3), rccv.alpha_
def raw_hist(X, y, feature, cumul=False):
minn = X[feature].min()
maxx = X[feature].max()
diff = maxx - minn
bins = np.linspace(minn-diff*0.01, maxx+diff*0.01, 30)
X.loc[y==1, feature].hist(density=True, cumulative=cumul, bins=bins, alpha=0.3, label='+')
X.loc[y==0, feature].hist(density=True, cumulative=cumul, bins=bins, alpha=0.3, label='-')
plt.legend()
plt.title(nicify(feature))
def contingency(X, features, verbose=True):
z = pd.crosstab(*[X[f] for f in features])
z.index.name = nicify(z.index.name)
z.columns.name = nicify(z.columns.name)
n = z.sum().sum()
chi2, p, _, _ = chi2_contingency(z)
k = min(*z.shape)
if n and k>1:
v = np.sqrt(chi2/(n*(k-1)))
else:
v = None
if min(z.shape) >= 2 and z.iloc[0, 1] and z.iloc[1, 1]:
num = (z.iloc[0, 0] / z.iloc[0, 1])
denom = (z.iloc[1, 0] / z.iloc[1, 1])
oddsr = num / denom
else:
oddsr = None
if verbose:
print('p = %.2g' % p)
return z, p, chi2, v, oddsr
def plot_coefs(clf, X, title=''):
x = rank_coefs(clf, X)
#x = x.drop('Intercept', errors='ignore')
threshold = x.drop('Intercept', errors='ignore')['Magnitude'].max()/10
x = x[x['Magnitude'] > threshold]
x = x.sort_values('Magnitude', ascending=True)
x['Pos'] = x.apply(lambda z: z['Magnitude'] if z['Sign']=='+' else None, axis=1)
x['Neg'] = x.apply(lambda z: z['Magnitude'] if z['Sign']=='-' else None, axis=1)*-1
try:
x['Pos'].plot(kind='barh', color='r', label='+')
except:
pass
try:
x['Neg'].plot(kind='barh', color='b', label='-')
except:
pass
plt.xlabel('Coefficient Magnitude')
plt.title(title)
plt.tight_layout()
def plot_pb_given_a_(X, b, a, restrict=None, ax=None, title=None, color='k', scale=1000, ticks=None):
if restrict is not None:
data = X.loc[restrict, [a, b]]
else:
data = X[[a, b]]
data = data.dropna()
kde = gaussian_kde(data.T)
if ticks is None:
ticks = np.linspace(-100, 100, 9)
a_ticks = [t for t in ticks if (t>=X[a].min() and t<=X[a].max())]
b_ticks = [t for t in ticks if (t>=X[b].min() and t<=X[b].max())]
a_support = a_ticks
b_support = np.linspace(b_ticks[0], b_ticks[-1], 100)
aa, bb = np.meshgrid(a_support, b_support)
pab = kde([aa.ravel(), bb.ravel()]).reshape(len(b_support), len(a_support))
pab = pd.DataFrame(pab, index=b_support, columns=a_support)
kde = gaussian_kde(data[a])
pa = pd.Series(kde(a_support), index=a_support)
pbga = pab.div(pa)
if ax is None:
ax = plt.gca()
for a_tick in a_ticks:
l2d = ax.plot(b_support, a_tick + scale*(pbga[a_tick]), label=a_tick, color=color)
color = l2d[0].get_color()
ax.plot(b_support, np.ones_like(b_support)*a_tick, '--', color=color)
ax.set_yticks(a_ticks)
ax.set_xticks(b_ticks)
ax.tick_params(reset=True, axis='y', length=5, width=1)
ax.set_xlim(b_ticks[0], b_ticks[-1])
ax.set_xlabel(nicify(b))
ax.set_ylabel(nicify(a))
if title:
ax.set_title(title)
#ax.legend()
return pab, pa
def plot_difference(pab_0, pa_0, pab_1, pa_1, ax=None, crange=(-1, 1), scale=10):
pbga_0 = pab_0.div(pa_0)
pbga_1 = pab_1.div(pa_1)
assert np.allclose(pbga_0.index, pbga_1.index)
assert np.allclose(pbga_0.columns, pbga_1.columns)
log2_odds = np.log2(pbga_1 / pbga_0)
from matplotlib.cm import get_cmap
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase
norm = Normalize(crange[0], crange[1], True)
cmap = get_cmap('RdBu_r')
for a_tick in log2_odds.columns:
color = cmap(norm(log2_odds[a_tick].values))
l2d = ax.scatter(log2_odds.index, a_tick + (scale*pa_0[a_tick]*2**log2_odds[a_tick]), label=a_tick, c=color, s=1)
#color = l2d[0].get_color()
ax.plot(log2_odds.index, np.ones_like(log2_odds.index)*a_tick, '--', color='k')
cb = plt.colorbar(l2d)
cb.outline.set_visible(False)
cb1 = ColorbarBase(cb.ax, cmap=cmap, norm=norm)
cticks = np.linspace(*crange, 5)
cb1.set_ticks(cticks)
cb1.set_ticklabels(['%.2g' % (2**x) for x in cticks])
cb1.set_label('Odds Ratio')
#cb.remove()
ax.set_title('Ratio')
def plot_conditionals(X, y, b, a, restrict=None, crange=(-2, 2), scale=10):
covid = {0: y[y==0].index,
1: y[y==1].index}
fig, ax = plt.subplots(1, 3, sharey=True, figsize=(15, 4))
if restrict is None:
restrict = y.index
restrict_0 = covid[0] & restrict
restrict_1 = covid[1] & restrict
pba_0, pa_0 = plot_pb_given_a_(X, b, a, restrict=restrict_0, ax=ax[0], title='COVID-', color='b')
pba_1, pa_1 = plot_pb_given_a_(X, b, a, restrict=restrict_1, ax=ax[1], title='COVID+', color='r')
ax[1].set_ylabel('')
ax[2].set_xlabel(ax[1].get_xlabel())
plot_difference(pba_0, pa_0, pba_1, pa_1, ax=ax[2], crange=crange, scale=scale)
plt.tight_layout()
return pba_0, pba_1
def get_matches(X, match_list):
return [x for x in X if any([m in x for m in match_list])]
def check_lr(X, y, cv, sample_weight=None):
from sklearn.linear_model import LogisticRegressionCV
lrcv = LogisticRegressionCV(penalty='elasticnet',
l1_ratios = np.linspace(0, 1, 5),
Cs = np.logspace(-3, 3, 7),
solver = 'saga',
scoring = 'roc_auc',
cv = cv,
max_iter=10000)
lrcv.fit(X, y, sample_weight=sample_weight)
return pd.DataFrame(lrcv.scores_[True].mean(axis=0),
index=lrcv.Cs_,
columns=lrcv.l1_ratios_)
def venn_covid(X, restrict, features, label, figsize=(5, 5)):
indices = {}
for feature in features:
z = X.loc[restrict, feature]
indices[feature] = set(z[z==1].index)
labels = get_labels([indices[feature] for feature in features], fill='percent')
labels = {k: v.replace('(','').replace(')','') for k, v in labels.items()}
venn3(labels, names=nicify(features), figsize=figsize, fontsize=9)
plt.gca().get_legend().remove()
z = X.loc[restrict, features]
z = z[z.sum(axis=1)==0].shape[0] / z.shape[0]
plt.title('%s; None of the three = %.1f%%' % (label, z*100))
def kde_plot(df, x, restrict, label, color, ax=None, title=None, **kwargs):
sns.set_style('whitegrid')
data = df.loc[restrict, x].dropna()
x_range = (np.min(df[x]), np.max(df[x]))
ax = sns.kdeplot(data, clip=x_range, color=color,
alpha=0.5, label=label, ax=ax, **kwargs)
ax.set_xlim(*x_range)
ax.set_xlabel(nicify(x), fontweight='bold')
ax.set_ylabel('Probabilty density', fontweight='bold')
if ax:
ax.set_title(nicify(x) if title is None else title)
return ax
def joint_plot(df, x, y, restrict, label, maxx=1e-3, cmap='Reds', cbar=False, ax=None):
sns.set_style('whitegrid')
data = df.loc[restrict, [x, y]].dropna()
x_range = (np.min(df[x]), np.max(df[x]))
y_range = (np.min(df[y]), np.max(df[y]))
ax = sns.kdeplot(data[x], data[y], shade=True, clip=[x_range, y_range],
vmin=0, vmax=maxx, cmap=cmap, shade_lowest=True, alpha=0.5,
ax=ax, n_levels=100, cbar=True,
cbar_kws={'format': '%.2g',
'label': 'Probability density (x1000)',
'shrink': 0.8})
cax = plt.gcf().axes[-1]
if cbar:
cbar_ticks = cax.get_yticks()
cax.set_yticklabels((cbar_ticks*1000).round(2))
else:
cax.remove()
ax.set_xlim(*x_range)
ax.set_ylim(*y_range)
ax.set_xlabel(nicify(x), fontweight='bold')
ax.set_ylabel(nicify(y), fontweight='bold')
ax.set_title(label, fontweight='bold')
return ax
def feature_hist(df, categories, feature, drop=None, bw=5, cut=0, ax=None, title=None, colors='rbgmck'):
for i, (label, indices) in enumerate(categories.items()):
ax = kde_plot(df, feature, indices, label, colors[i], lw=3, bw=bw, cut=cut, ax=ax, title=title)
ax.legend(fontsize=9);
def feature_contingency(df, categories, feature, drop=None, normalize=None, verbose=True):
z = df[[feature]].copy()
for label, indices in categories.items():
z.loc[indices, 'Group'] = label
if drop:
z = z[~z[feature].isin(drop)]
c = contingency(z, [feature, 'Group'], verbose=verbose)[0]
if normalize is not None:
c = c.div(c.sum(axis=normalize), axis=1-normalize).round(2)
try:
c.index = [x.replace('\n', ' ') for x in c.index]
except:
pass
try:
c.columns = [x.replace('\n', ' ') for x in c.columns]
except:
pass
c = c.rename(index={'M': 'Men', 'F': 'Women'})
return c
def feature_compare(df, categories, feature):
z = pd.DataFrame(index=list(categories),
columns=pd.MultiIndex.from_product([categories, ['Δ', 'σ', 'seΔ', 'D', 'p']]))
z.index.name = nicify(feature)
for d1 in categories:
for d2 in categories:
x1 = df.loc[categories[d1], feature]
x2 = df.loc[categories[d2], feature]
delta = x1.mean() - x2.mean()
d = cohen_d(x1, x2)
p = mannwhitneyu(x1, x2).pvalue
z.loc[d1, (d2, 'Δ')] = "%.2g" % delta
z.loc[d1, (d2, 'σ')] = "%.2g" % (0 if not d>0 else delta/d)
z.loc[d1, (d2, 'seΔ')] = "%.2g" % (delta/np.sqrt(len(x1)+len(x2)))
z.loc[d1, (d2, 'D')] = "%.2g" % d
z.loc[d1, (d2, 'p')] = "%.2g" % p
if len(categories)==2:
d1 = list(categories)[0]
d2 = list(categories)[1]
z = z.loc[d1, d2]
z.name = nicify(feature)
return z
def features_compare(df, categories, features):
assert len(categories) == 2
zs = [feature_compare(df, categories, feature) for feature in features]
return pd.concat(zs, axis=1)
def hist_or_contingency(df, categories, feature, drop=None, normalize=None):
if (df.dtypes[feature] != 'object') and (df[feature].max() > 5 or df[feature].min() < -5):
f = feature_hist
else:
f = feature_contingency
return f(df, categories, feature, drop=None, normalize=None)
def describe_three_clusters(df, feature, s, drop=None, normalize=None):
smell_loss = (df['Smell_change_illness']<-80)
smell_recovery = (df['Smell_recovery_illness']>30)
r = (df['Recovery_y/n']==2) & df.index.to_series().isin(s['covid'])
categories = {'Recovered Smell': r & smell_loss & smell_recovery,
'Nonrecovered Smell': r & smell_loss & ~smell_recovery,
'Intact Smell': r & ~smell_loss}
return hist_or_contingency(df, categories, feature, drop=drop, normalize=normalize)
def get_set(df, query):
return set(df.query(query, engine='python').index)
def diagnosis_joint_plots(df, feature1, feature2, r, s, maxx=3e-4):
if r is None:
r = set(df.index)
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(6, 11.5))
for i, (diagnosis, label, cmap) in enumerate([('lab-covid', 'C19+', 'Reds'),
('non-covid', 'C19-', 'Reds')]):
joint_plot(df, feature1, feature2,
r & s[diagnosis], label, cmap=cmap, maxx=maxx, ax=ax[i], cbar=(i==0))
return ax
def statsmodels_to_df(results, plot=False, title='', figsize=(10, 5), scale=None):
summ = results.summary()
df = pd.read_csv(StringIO(summ.tables[1].as_csv()), index_col=0)
df.columns = df.columns.str.strip()
df['abs_coef'] = df['coef'].abs()
df.index = df.index.str.strip()
df = df.sort_values('abs_coef', ascending=False)
df = df.round(2)
df['P>|z|'] = results.pvalues#.apply(lambda x: '%.1g'%x)
df = df[df['abs_coef']>0]
if scale is not None and scale is not False:
df['coef'] /= scale
try:
df['std err'] /= scale
df['[0.025'] /= scale
df['0.975]'] /= scale
except:
pass
df.index = nicify(list(df.index))
if plot:
plt.figure(figsize=figsize)
dfp = df.drop('Intercept')
ax = dfp.sort_values('abs_coef', ascending=True).plot.barh(y='coef', xerr='std err', legend=None, capsize=4, ax=plt.gca())
labels = ax.get_yticklabels()
ax.set_yticklabels('%s\n(p=%.1g)' % (label.get_text(), df['P>|z|'].iloc[-i-2])
for i, label in enumerate(labels))
ax.set_xlabel('Regression Coefficient', fontweight='bold')
ax.set_title(title, fontweight='bold')
df = df.drop('abs_coef', axis=1)
for col in df:
def fill(x):
try:
return '%.2g' % float(x)
except:
return None
df[col] = df[col].apply(fill)
return df
def pooled_sd(x1, x2):
n1 = len(x1)
n2 = len(x2)
s1 = np.std(x1)
s2 = np.std(x2)
num = (n1-1)*(s1**2) + (n2-1)*(s2**2)
denom = n1 + n2 - 2
return | np.sqrt(num/denom) | numpy.sqrt |
import numpy as np
from single_peaked_bandits.helpers import cumulative_reward
from single_peaked_bandits.solvers.base import BaseSolver
class DiscountedUCB(BaseSolver):
def __init__(self, xi=0.6, gamma=None):
super().__init__("discounted_ucb")
self.xi = xi
self.gamma = gamma
self.B = 1 # bound on rewards
def _update_optimistic_bound(
self,
bandit,
policy,
T,
timestep,
optimistic_bounds,
arms_to_update,
history_arms,
history_values,
B,
xi,
gamma,
):
discounted_count = [None] * len(bandit.arms)
sum_discounted_count = 0
for i in range(len(bandit.arms)):
discounted_count[i] = sum(
[
gamma ** (timestep - j - 1) if history_arms[j] == i else 0
for j in range(timestep)
]
)
sum_discounted_count += discounted_count[i]
for i in arms_to_update:
if discounted_count[i] < 1e-10:
optimistic_bounds[i] = float("inf")
else:
empirical_average = (
sum(
[
history_values[j] * gamma ** (timestep - j - 1)
if history_arms[j] == i
else 0
for j in range(timestep)
]
)
/ discounted_count[i]
)
exploration_bonus = B * np.sqrt(
xi * np.log(sum_discounted_count) / discounted_count[i]
)
optimistic_bounds[i] = empirical_average + exploration_bonus
def solve(self, bandit, T):
n_arms = len(bandit.arms)
# pull every arm once
n_init = 1
timestep = n_init * n_arms
policy = [n_init] * n_arms
history_arms = list(range(n_arms))
history_values = [bandit.arms[i](1) for i in history_arms]
optimistic_bounds = | np.zeros(n_arms) | numpy.zeros |
# -*- coding: iso-8859-15 -*-
#
# This software was written by <NAME> (<NAME>)
# Copyright <NAME>
# All rights reserved
# This software is licenced under a 3-clause BSD style license
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#Neither the name of the University College London nor the names
#of the code contributors may be used to endorse or promote products
#derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Developed by <NAME> (MSSL/UCL)
# uvotpy
# (c) 2009-2017, see Licence
from future.builtins import str
from future.builtins import input
from future.builtins import range
__version__ = '2.9.0 20171209'
import sys
import optparse
import numpy as np
import matplotlib.pyplot as plt
try:
from astropy.io import fits as pyfits
from astropy import wcs
except:
import pyfits
import re
import warnings
try:
import imagestats
except:
import stsci.imagestats as imagestats
import scipy
from scipy import interpolate
from scipy.ndimage import convolve
from scipy.signal import boxcar
from scipy.optimize import leastsq
from scipy.special import erf
from numpy import polyfit, polyval
'''
try:
#from uvotpy import uvotplot,uvotmisc,uvotwcs,rationalfit,mpfit,uvotio
import uvotplot
import uvotmisc
import uvotwcs
import rationalfit
import mpfit
import uvotio
except:
pass
'''
from uvotmisc import interpgrid, uvotrotvec, rdTab, rdList
from generate_USNOB1_cat import get_usnob1_cat
import datetime
import os
if __name__ != '__main__':
anchor_preset = list([None,None])
bg_pix_limits = list([-100,-70,70,100])
bg_lower_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
bg_upper_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
offsetlimit = None
#set Global parameters
status = 0
do_coi_correction = True # if not set, disable coi_correction
tempnames = list()
tempntags = list()
cval = -1.0123456789
interactive = True
update_curve = True
contour_on_img = False
give_result = False # with this set, a call to getSpec returns all data
give_new_result = False
use_rectext = False
background_method = 'boxcar' # alternatives 'splinefit' 'boxcar'
background_smoothing = [50,7] # 'boxcar' default smoothing in dispersion and across dispersion in pix
background_interpolation = 'linear'
trackcentroiding = True # default (= False will disable track y-centroiding)
global trackwidth
trackwidth = 2.5 # width of extraction region in sigma (alternative default = 1.0) 2.5 was used for flux calibration.
bluetrackwidth = 1.3 # multiplier width of non-order-overlapped extraction region [not yet active]
write_RMF = False
background_source_mag = 18.0
zeroth_blim_offset = 1.0
coi_half_width = None
slit_width = 200
_PROFILE_BACKGROUND_ = False # start with severe sigma-clip f background, before going to smoothing
today_ = datetime.date.today()
datestring = today_.isoformat()[0:4]+today_.isoformat()[5:7]+today_.isoformat()[8:10]
fileversion=1
calmode=True
typeNone = type(None)
senscorr = True # do sensitivity correction
print(66*"=")
print("uvotpy module uvotgetspec version=",__version__)
print("<NAME> (c) 2009-2017, see uvotpy licence.")
print("please use reference provided at http://github.com/PaulKuin/uvotpy")
print(66*"=","\n")
def getSpec(RA,DEC,obsid, ext, indir='./', wr_outfile=True,
outfile=None, calfile=None, fluxcalfile=None,
use_lenticular_image=True,
offsetlimit=None, anchor_offset=None, anchor_position=[None,None],
background_lower=[None,None], background_upper=[None,None],
background_template=None,
fixed_angle=None, spextwidth=13, curved="update",
fit_second=False, predict2nd=True, skip_field_src=False,
optimal_extraction=False, catspec=None,write_RMF=write_RMF,
get_curve=None,fit_sigmas=True,get_sigma_poly=False,
lfilt1=None, lfilt1_ext=None, lfilt2=None, lfilt2_ext=None,
wheelpos=None, interactive=interactive, sumimage=None, set_maglimit=None,
plot_img=True, plot_raw=True, plot_spec=True, zoom=True, highlight=False,
uvotgraspcorr_on=True, ank_c_0offset = False,
update_pnt=True, ifmotion=False, motion_file=None, anchor_x_offset=False,
replace=None,ifextended=False, singleside_bkg = False, fixwidth = False,
clobber=False, chatter=1):
'''Makes all the necessary calls to reduce the data.
Parameters
----------
ra, dec : float
The Sky position (J2000) in **decimal degrees**
obsid : str
The observation ID number as a **String**. Typically that is
something like "00032331001" and should be part of your
grism filename which is something like "sw00032331001ugu_dt.img"
ext : int
number of the extension to process
kwargs : dict
optional keyword arguments, possible values are:
- **fit_second** : bool
fit the second order. Off since it sometimes causes problems when the
orders overlap completely. Useful for spectra in top part detector
- **background_lower** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **background_upper** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **offsetlimit** : None,int,[center,range]
Default behaviour is to determine automatically any required offset from
the predicted anchor position to the spectrum, and correct for that.
The automated method may fail in the case of a weak spectrum and strong zeroth
or first order next to the spectrum. Two methods are provided:
(1) provide a number which will be used to limit the allowed offset. If
within that limit no peak is identified, the program will stop and require
you to provide a manual offset value. Try small numbers like 1, -1, 3, etc..
(2) if you already know the approximate y-location of the spectrum at the
anchor x-position in the rotated small image strip around the spectrum, you
can give this with a small allowed range for fine tuning as a list of two
parameter values. The first value in the list must be the y-coordinate
(by default the spectrum falls close to y=100 pixels), the second parameter
the allowed adjustment to a peak value in pixels. For example, [105,2].
This will require no further interactive input, and the spectrum will be
extracted using that offset.
- **wheelpos**: {160,200,955,1000}
filter wheel position for the grism filter mode used. Helpful for
forcing Vgrism or UVgrism input when both are present in the directory.
160:UV Clocked, 200:UV Nominal, 955:V clocked, 1000:V nominal
- **zoom** : bool
when False, the whole extracted region is displayed, including zeroth
order when present.
- **clobber** : bool
When True, overwrite earlier output (see also outfile)
- **write_RMF** : bool
When True, write the rmf file (will take extra time due to large matrix operations)
- **use_lenticular_image** : bool
When True and a lenticular image is present, it is used. If False,
the grism image header WCS-S system will be used for the astrometry,
with an automatic call to uvotgraspcorr for refinement.
- **sumimage** : str
Name summed image generated using ``sum_Extimage()``, will extract spectrum
from summed image.
- **wr_outfile** : bool
If False, no output file is written
- **outfile** : path, str
Name of output file, other than automatically generated.
- **calfile** : path, str
calibration file name
- **fluxcalfile** : path, str
flux calibration file name or "CALDB" or None
- **predict2nd** : bool
predict the second order flux from the first. Overestimates in centre a lot.
- **skip_field_src** : bool
if True do not locate zeroth order positions. Can be used if
absence internet connection or USNO-B1 server causes problems.
- **optimal_extraction** : bool, obsolete
Do not use.Better results with other implementation.
- **catspec** : path
optional full path to the catalog specification file for uvotgraspcorr.
- **get_curve** : bool or path
True: activate option to supply the curvature coefficients of all
orders by hand.
path: filename with coefficients of curvature
- **uvotgraspcorr_on** : bool
enable/disable rerun of uvotgraspcorr to update the WCS keywords
- **update_pnt** : bool
enable/disable update of the WCS keywords from the attitude file
(this is done prior to running uvotgraspcorr is that is enabled)
- **fit_sigmas** : bool
fit the sigma of trackwidths if True (not implemented, always on)
- **get_sigma_poly** : bool
option to supply the polynomial for the sigma (not implemented)
- **lfilt1**, **lfilt2** : str
name if the lenticular filter before and after the grism exposure
(now supplied by fileinfo())
- **lfilt1_ext**, **lfilt2_ext** : int
extension of the lenticular filter (now supplied by fileinfo())
- **plot_img** : bool
plot the first figure with the det image
- **plot_raw** : bool
plot the raw spectrum data
- **plot_spec** : bool
plot the flux spectrum
- **highlight** : bool
add contours to the plots to highlight contrasts
- **chatter** : int
verbosity of program
- **set_maglimit** : int
specify a magnitude limit to seach for background sources in the USNO-B1 catalog
- **background_template** : numpy 2D array
User provides a background template that will be used instead
determining background. Must be in counts. Size and alignment
must exactly match detector image.
Returns
-------
None, (give_result=True) compounded data (Y0, Y1, Y2, Y3, Y4) which
are explained in the code, or (give_new_result=True) a data dictionary.
Notes
-----
**Quick Start**
`getSpec(ra,dec,obsid, ext,)`
should produce plots and output files
**Which directory?**
The program needs to be started from the CORRECT data directory.
The attitude file [e.g., "sw<OBSID>pat.fits" ]is needed!
A link or copy of the attitude file needs to be present in the directory
or "../../auxil/" directory as well.
**Global parameters**
These parameters can be reset, e.g., during a (i)python session, before calling getSpec.
- **trackwidth** : float
width spectral extraction in units of sigma. The default is trackwidth = 2.5
The alternative default is trackwidth = 1.0 which gives better results for
weak sources, or spectra with nearby contamination. However, the flux
calibration and coincidence-loss correction give currently inconsistent
results. When using trackwidth=1.0, rescale the flux to match trackwidth=2.5
which value was used for flux calibration and coincidence-loss correction.
- **give_result** : bool
set to False since a call to getSpec with this set will return all the
intermediate results. See returns
When the extraction slit is set to be straight ``curved="straight"`` it cuts off the UV part of the
spectrum for spectra located in the top left and bottom right of the image.
History
-------
Version 2011-09-22 NPMK(MSSL) : handle case with no lenticular filter observation
Version 2012-01-15 NPMK(MSSL) : optimal extraction is no longer actively supported until further notice
Version 2013-10-23 NPMK(MSSL) : fixed bug so uvotgraspcorr gives same accuracy as lenticular filter
Version 2014-01-01 NPMK(MSSL) : aperture correction for background added; output dictionary
Version 2014-07-23 NPMK(MSSL) : coi-correction using new calibrared coi-box and factor
Version 2014-08-04 NPMK(MSSL/UCL): expanded offsetlimit parameter with list option to specify y-range.
Version 2015-12-03 NPMK(MSSL/UCL): change input parameter 'get_curve' to accept a file name with coefficients
Version 2016-01-16 NPMK(MSSL/UCL): added options for background; disable automated centroiding of spectrum
Example
-------
from uvotpy.uvotgetspec import getSpec
from uvotpy import uvotgetspec
import os, shutil
indir1 = os.getenv('UVOTPY') +'/test'
indir2 = os.getcwd()+'/test/UVGRISM/00055900056/uvot/image'
shutil.copytree(indir1, os.getcwd()+'/test' )
getSpec( 254.7129625, 34.3148667, '00055900056', 1, offsetlimit=1,indir=indir2, clobber=True )
'''
# (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
# (Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra = Y0
#
#( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
# (C_1,C_2,img), hdr,m1,m2,aa,wav1 ) = Y1
#
#fit,(coef0,coef1,coef2,coef3),(bg_zeroth,bg_first,bg_second,bg_third),(borderup,borderdown),apercorr,expospec=Y2
#
#counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
#
#wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
#
# where,
#
#(present0,present1,present2,present3),(q0,q1,q2,q3), \
# (y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
# (y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
# (x,xstart,xend,sp_all,quality,co_back) = fit
#
# dis = dispersion with zero at ~260nm[UV]/420nm[V] ; spnet = background-substracted spectrum from 'spnetimg'
# angle = rotation-angle used to extract 'extimg' ; anker = first order anchor position in DET coordinates
# anker2 = second order anker X,Y position ; anker_field = Xphi,Yphy input angles with respect to reference
# ank_c = X,Y position of axis of rotation (anker) in 'extimg'
# bg = mean background, smoothed, with sources removed
# bg1 = one-sided background, sources removed, smoothed ; bg2 = same for background opposite side
# extimg = image extracted of source and background, 201 pixels wide, all orders.
# spimg = image centered on first order position ; spnetimg = background-subtracted 'spimg'
# offset = offset of spectrum from expected position based on 'anchor' at 260nm[UVG]/420nm[VG], first order
# C_1 = dispersion coefficients [python] first order; C_2 = same for second order
# img = original image ;
# WC_lines positions for selected WC star lines ; hdr = header for image
# m1,m2 = index limits spectrum ; aa = indices spectrum (e.g., dis[aa])
# wav1 = wavelengths for dis[aa] first order (combine with spnet[aa])
#
# when wr_outfile=True the program produces a flux calibrated output file by calling uvotio.
# [fails if output file is already present and clobber=False]
#
# The background must be consistent with the width of the spectrum summed.
from uvotio import fileinfo, rate2flux, readFluxCalFile
from uvotplot import plot_ellipsoid_regions
if (type(RA) == np.ndarray) | (type(DEC) == np.array):
raise IOError("RA, and DEC arguments must be of float type ")
if type(offsetlimit) == list:
if len(offsetlimit) != 2:
raise IOError("offsetlimit list must be [center, distance from center] in pixels")
get_curve_filename = None
a_str_type = type(curved)
if chatter > 4 :
print ("\n*****\na_str_type = ",a_str_type)
print ("value of get_curve = ",get_curve)
print ("type of parameter get_curve is %s\n"%(type(get_curve)) )
print ("type curved = ",type(curved))
if type(get_curve) == a_str_type:
# file name: check this file is present
if os.access(get_curve,os.F_OK):
get_curve_filename = get_curve
get_curve = True
else:
raise IOError(
"ERROR: get_curve *%s* is not a boolean value nor the name of a file that is on the disk."
%(get_curve) )
elif type(get_curve) == bool:
if get_curve:
get_curve_filename = None
print("requires input of curvature coefficients")
elif type(get_curve) == type(None):
get_curve = False
else:
raise IOError("parameter get_curve should by type str or bool, but is %s"%(type(get_curve)))
# check environment
CALDB = os.getenv('CALDB')
if CALDB == '':
print('WARNING: The CALDB environment variable has not been set')
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('WARNING: The HEADAS environment variable has not been set')
print('That is needed for the calls to uvot Ftools ')
#SCAT_PRESENT = os.system('which scat > /dev/null')
#if SCAT_PRESENT != 0:
# print('WARNING: cannot locate the scat program \nDid you install WCSTOOLS ?\n')
SESAME_PRESENT = os.system('which sesame > /dev/null')
#if SESAME_PRESENT != 0:
# print 'WARNING: cannot locate the sesame program \nDid you install the cdsclient tools?\n'
# fix some parameters
framtime = 0.0110329 # all grism images are taken in unbinned mode
splineorder=3
getzmxmode='spline'
smooth=50
testparam=None
msg = "" ; msg2 = "" ; msg4 = ""
attime = datetime.datetime.now()
logfile = 'uvotgrism_'+obsid+'_'+str(ext)+'_'+'_'+attime.isoformat()[0:19]+'.log'
if type(fluxcalfile) == bool: fluxcalfile = None
tempnames.append(logfile)
tempntags.append('logfile')
tempnames.append('rectext_spectrum.img')
tempntags.append('rectext')
lfiltnames=np.array(['uvw2','uvm2','uvw1','u','b','v','wh'])
ext_names =np.array(['uw2','um2','uw1','uuu','ubb','uvv','uwh'])
filestub = 'sw'+obsid
histry = ""
for x in sys.argv: histry += x + " "
Y0 = None
Y2 = None
Y3 = None
Y4 = None
Yfit = {}
Yout = {"coi_level":None} # output dictionary (2014-01-01; replace Y0,Y1,Y2,Y3)
lfilt1_aspcorr = "not initialized"
lfilt2_aspcorr = "not initialized"
qflag = quality_flags()
ZOpos = None
# parameters getSpec()
Yout.update({'indir':indir,'obsid':obsid,'ext':ext})
Yout.update({'ra':RA,'dec':DEC,'wheelpos':wheelpos})
if type(sumimage) == typeNone:
if background_template is not None:
# convert background_template to a dictionary
background_template = {'template':np.asarray(background_template),
'sumimg':False}
try:
ext = int(ext)
except:
print("fatal error in extension number: must be an integer value")
# locate related lenticular images
specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile = \
fileinfo(filestub,ext,directory=indir,wheelpos=wheelpos,chatter=chatter)
# set some flags and variables
lfiltinput = (lfilt1 != None) ^ (lfilt2 != None)
lfiltpresent = lfiltinput | (lfilt1_ != None) | (lfilt2_ != None)
if (type(lfilt1_) == typeNone) & (type(lfilt2_) == typeNone):
# ensure the output is consistent with no lenticular filter solution
use_lenticular_image = False
# translate
filt_id = {"wh":"wh","v":"vv","b":"bb","u":"uu","uvw1":"w1","uvm2":"m2","uvw2":"w2"}
lfiltflag = False
if ((type(lfilt1) == typeNone)&(type(lfilt1_) != typeNone)):
lfilt1 = lfilt1_
lfilt1_ext = lfilt1_ext_
if chatter > 0: print("lenticular filter 1 from search lenticular images"+lfilt1+"+"+str(lfilt1_ext))
lfiltflag = True
lfilt1_aspcorr = None
try:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
except:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img.gz",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
if ((type(lfilt2) == typeNone)&(type(lfilt2_) != typeNone)):
lfilt2 = lfilt2_
lfilt2_ext = lfilt2_ext_
if chatter > 0: print("lenticular filter 2 from search lenticular images"+lfilt2+"+"+str(lfilt2_ext))
lfiltflag = True
lfilt2_aspcorr = None
try:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
except:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img.gz",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
# report
if chatter > 4:
msg2 += "getSpec: image parameter values\n"
msg2 += "ra, dec = (%6.1f,%6.1f)\n" % (RA,DEC)
msg2 += "filestub, extension = %s[%i]\n"% (filestub, ext)
if lfiltpresent & use_lenticular_image:
msg2 += "first/only lenticular filter = "+lfilt1+" extension first filter = "+str(lfilt1_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt1_aspcorr)
if lfilt2_ext != None:
msg2 += "second lenticular filter = "+lfilt2+" extension second filter = "+str(lfilt2_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt2_aspcorr)
if not use_lenticular_image:
msg2 += "anchor position derived without lenticular filter\n"
msg2 += "spectrum extraction preset width = "+str(spextwidth)+'\n'
#msg2 += "optimal extraction "+str(optimal_extraction)+'\n'
hdr = pyfits.getheader(specfile,int(ext))
if chatter > -1:
msg += '\nuvotgetspec version : '+__version__+'\n'
msg += ' Position RA,DEC : '+str(RA)+' '+str(DEC)+'\n'
msg += ' Start date-time : '+str(hdr['date-obs'])+'\n'
msg += ' grism file : '+specfile.split('/')[-1]+'['+str(ext)+']\n'
msg += ' attitude file : '+attfile.split('/')[-1]+'\n'
if lfiltpresent & use_lenticular_image:
if ((lfilt1 != None) & (lfilt1_ext != None)):
msg += ' lenticular file 1: '+lfilt1+'['+str(lfilt1_ext)+']\n'
msg += ' aspcorr: '+lfilt1_aspcorr+'\n'
if ((lfilt2 != None) & (lfilt2_ext != None)):
msg += ' lenticular file 2: '+lfilt2+'['+str(lfilt2_ext)+']\n'
msg += ' aspcorr: '+lfilt2_aspcorr+'\n'
if not use_lenticular_image:
msg += "anchor position derived without lenticular filter\n"
if not 'ASPCORR' in hdr: hdr['ASPCORR'] = 'UNKNOWN'
Yout.update({'hdr':hdr})
tstart = hdr['TSTART']
tstop = hdr['TSTOP']
wheelpos = hdr['WHEELPOS']
expo = hdr['EXPOSURE']
expmap = [hdr['EXPOSURE']]
Yout.update({'wheelpos':wheelpos})
if 'FRAMTIME' not in hdr:
# compute the frametime from the CCD deadtime and deadtime fraction
#deadc = hdr['deadc']
#deadtime = 600*285*1e-9 # 600ns x 285 CCD lines seconds
#framtime = deadtime/(1.0-deadc)
framtime = 0.0110329
hdr.update('framtime',framtime,comment='frame time computed from deadc ')
Yout.update({'hdr':hdr})
if chatter > 1:
print("frame time computed from deadc - added to hdr")
print("with a value of ",hdr['framtime']," ",Yout['hdr']['framtime'])
if not 'detnam' in hdr:
hdr.update('detnam',str(hdr['wheelpos']))
msg += ' exposuretime : %7.1f \n'%(expo)
maxcounts = 1.1 * expo/framtime
if chatter > 0:
msg += ' wheel position : '+str(wheelpos)+'\n'
msg += ' roll angle : %5.1f\n'% (hdr['pa_pnt'])
msg += 'coincidence loss version: 2 (2014-07-23)\n'
msg += '======================================\n'
try:
if ( (np.abs(RA - hdr['RA_OBJ']) > 0.4) ^ (np.abs(DEC - hdr['DEC_OBJ']) > 0.4) ):
sys.stderr.write("\nWARNING: It looks like the input RA,DEC and target position in header are different fields\n")
except (RuntimeError, TypeError, NameError, KeyError):
pass
msg2 += " cannot read target position from header for verification\n"
if lfiltinput:
# the lenticular filter(s) were specified on the command line.
# check that the lenticular image and grism image are close enough in time.
if type(lfilt1_ext) == typeNone:
lfilt1_ext = int(ext)
lpos = np.where( np.array([lfilt1]) == lfiltnames )
if len(lpos[0]) < 1: sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile1 = filestub+lnam[0]+'_sk.img'
hdr_l1 = pyfits.getheader(lfile1,lfilt1_ext)
tstart1 = hdr_l1['TSTART']
tstop1 = hdr_l1['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile1+" matches the grism image\n")
if lfilt2 != None:
if type(lfilt2_ext) == typeNone:
lfilt2_ext = lfilt1_ext+1
lpos = np.where( np.array([lfilt2]) == lfiltnames )
if len(lpos[0] < 1): sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile2 = filestub+lnam[0]+'_sk.img'
hdr_l2 = pyfits.getheader(lfile1,lfilt1_ext)
tstart2 = hdr_l2['TSTART']
tstop2 = hdr_l2['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile2+" matches the grism image\n")
if (not lfiltpresent) | (not use_lenticular_image):
method = "grism_only"
else:
method = None
if not senscorr: msg += "WARNING: No correction for sensitivity degradation applied.\n"
# get the USNO-B1 catalog data for the field, & find the zeroth orders
if (not skip_field_src):
if chatter > 2: print("============== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
# retrieve the input angle relative to the boresight
Xphi, Yphi, date1, msg3, lenticular_anchors = findInputAngle( RA, DEC, filestub, ext,
uvotgraspcorr_on=uvotgraspcorr_on, update_pnt=update_pnt, msg="", \
wheelpos=wheelpos, lfilter=lfilt1, lfilter_ext=lfilt1_ext, \
lfilt2=lfilt2, lfilt2_ext=lfilt2_ext, method=method, \
attfile=attfile, catspec=catspec, indir=indir, chatter=chatter)
Yout.update({"Xphi":Xphi,"Yphi":Yphi})
Yout.update({'lenticular_anchors':lenticular_anchors})
# read the anchor and dispersion out of the wavecal file
anker, anker2, C_1, C_2, angle, calibdat, msg4 = getCalData(Xphi,Yphi,wheelpos, date1, \
calfile=calfile, chatter=chatter)
hdrr = pyfits.getheader(specfile,int(ext))
if (hdrr['aspcorr'] == 'UNKNOWN') & (not lfiltpresent):
msg += "WARNING: No aspect solution found. Anchor uncertainty large.\n"
msg += "first order anchor position on detector in det coordinates:\n"
msg += "anchor1=(%8.2f,%8.2f)\n" % (anker[0],anker[1])
msg += "first order dispersion polynomial (distance anchor, \n"
msg += " highest term first)\n"
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order anchor position on detector in det coordinates:\n"
msg += "anchor2=(%8.2f,%8.2f)\n" % (anker2[0],anker2[1])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
#sys.stderr.write( "first order anchor = %s\n"%(anker))
#sys.stderr.write( "second order anchor = %s\n"%(anker2))
msg += "first order dispersion = %s\n"%(str(C_1))
msg += "second order dispersion = %s\n"%(str(C_2))
if chatter > 1:
sys.stderr.write( "first order dispersion = %s\n"%(str(C_1)) )
sys.stderr.write( "second order dispersion = %s\n"%(str(C_2)) )
msg += "lenticular filter anchor positions (det)\n"
msg += msg3
# override angle
if fixed_angle != None:
msg += "WARNING: overriding calibration file angle for extracting \n\t"\
"spectrum cal: "+str(angle)+'->'+str(fixed_angle)+" \n"
angle = fixed_angle
# override anchor position in det pixel coordinates
if anchor_position[0] != None:
cal_anker = anker
anker = np.array(anchor_position)
msg += "overriding anchor position with value [%8.1f,%8.1f]\n" % (anker[0],anker[1])
anker2 = anker2 -cal_anker + anker
msg += "overriding anchor position 2nd order with value [%8.1f,%8.1f]\n"%(anker2[0],anker2[1])
anker_field = np.array([Xphi,Yphi])
theta=np.zeros(5)+angle # use the angle from first order everywhere.
C_0 = np.zeros(3) # not in calibration file. Use uvotcal/zemax to get.
C_3 = np.zeros(3)
Cmin1 = np.zeros(3)
msg += "field coordinates:\n"
msg += "FIELD=(%9.4f,%9.4f)\n" % (Xphi,Yphi)
# order distance between anchors
dist12 = np.sqrt( (anker[0]-anker2[0])**2 + (anker[1]-anker2[1])**2 )
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
Yout.update({"anker":anker,"anker2":anker2,"C_1":C_1,"C_2":C_2,"theta":angle,"dist12":dist12})
# determine x,y locations of certain wavelengths on the image
# TBD: add curvature
if wheelpos < 500:
wavpnt = np.arange(1700,6800,slit_width)
else:
wavpnt = np.arange(2500,6600,slit_width)
dispnt=pixdisFromWave(C_1,wavpnt) # pixel distance to anchor
if chatter > 0: msg2 += 'first order angle at anchor point: = %7.1f\n'%(angle)
crpix = crpix1,crpix2 = hdr['crpix1'],hdr['crpix2']
crpix = np.array(crpix) # centre of image
ankerimg = anker - np.array([1100.5,1100.5])+crpix
xpnt = ankerimg[0] + dispnt*np.cos((180-angle)*np.pi/180)
ypnt = ankerimg[1] + dispnt*np.sin((180-angle)*np.pi/180)
msg += "1st order anchor on image at (%7.1f,%7.1f)\n"%(ankerimg[0],ankerimg[1])
if chatter > 4: msg += "Found anchor point; now extracting spectrum.\n"
if chatter > 2: print("==========Found anchor point; now extracting spectrum ========")
if type(offsetlimit) == typeNone:
if wheelpos > 300:
offsetlimit = 9
sys.stdout.write("automatically set the value for the offsetlimit = "+str(offsetlimit)+'\n')
# find position zeroth order on detector from WCS-S after update from uvotwcs
#if 'hdr' not in Yout:
# hdr = pyfits.getheader(specfile,int(ext))
# Yout.update({'hdr':hdr})
zero_xy_imgpos = [-1,-1]
if chatter > 1: print("zeroth order position on image...")
try:
wS =wcs.WCS(header=hdr,key='S',relax=True,)
zero_xy_imgpos = wS.wcs_world2pix([[RA,DEC]],0)
print("position not corrected for SIP = ", zero_xy_imgpos[0][0],zero_xy_imgpos[0][1])
zero_xy_imgpos = wS.sip_pix2foc(zero_xy_imgpos, 0)[0]
if chatter > 1:
"print zeroth order position on image:",zero_xy_imgpos
except:
pass
Yout.update({'zeroxy_imgpos':zero_xy_imgpos})
# provide some checks on background inputs:
if background_lower[0] != None:
background_lower = np.abs(background_lower)
if np.sum(background_lower) >= (slit_width-10):
background_lower = [None,None]
msg += "WARNING: background_lower set too close to edge image\n Using default\n"
if background_upper[0] != None:
background_upper = np.abs(background_upper)
if np.sum(background_upper) >= (slit_width-10):
background_upper = [None,None]
msg += "WARNING: background_upper set too close to edge image\n Using default\n"
# in case of summary file:
if (not skip_field_src) & (ZOpos == None):
if chatter > 2: print("DEBUG 802 ================== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
try:
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
except:
if type(sumimage) == typeNone:
print ("exception to call find_zeroth_orders : skip_field_src = ",skip_field_src)
pass
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
if (not skip_field_src):
Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector = ZOpos
pivot_ori=np.array([(ankerimg)[0],(ankerimg)[1]])
Y_ZOpos={"Xim":Xim,"Yim":Yim,"Xa":Xa,"Yb":Yb,"Thet":Thet,"b2mag":b2mag,
"matched":matched,"ondetector":ondetector}
Yout.update({"ZOpos":Y_ZOpos})
else:
Yout.update({"ZOpos":None})
# find background, extract straight slit spectrum
if chatter > 3 : print ("DEBUG 827 compute background")
if sumimage != None:
# initialize parameters for extraction summed extracted image
print('reading summed image file : '+sumimage)
print('ext label for output file is set to : ', ext)
Y6 = sum_Extimage (None, sum_file_name=sumimage, mode='read')
extimg, expmap, exposure, wheelpos, C_1, C_2, dist12, anker, \
(coef0, coef1,coef2,coef3,sig0coef,sig1coef,sig2coef,sig3coef), hdr = Y6
if background_template != None:
background_template = {'extimg': background_template,
'sumimg': True}
if (background_template['extimg'].size != extimg.size):
print("ERROR")
print("background_template.size=",background_template['extimg'].size)
print("extimg.size=",extimg.size)
raise IOError("The template does not match the sumimage dimensions")
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
print("first order anchor = ",anker)
print("first order dispersion = %s"%(str(C_1)))
print("second order dispersion = %s"%(str(C_2)))
tstart = hdr['tstart']
ank_c = [100,500,0,2000]
if type(offsetlimit) == typeNone:
offset = 0
elif type(offsetlimit) == list:
offset = offsetlimit[0]-96
ank_c[0] = offsetlimit[0]
else:
offset = offsetlimit # for sumimage used offsetlimit to set the offset
ank_c[0] = 96+offsetlimit
dis = np.arange(-500,1500)
img = extimg
# get background
bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra = findBackground(extimg,
background_lower=background_lower,
background_upper=background_upper,)
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
skip_field_src = True
spnet = bg1 # placeholder
expo = exposure
maxcounts = exposure/0.01
anker2 = anker + [dist12,0]
spimg,spnetimg,anker_field = None, None, (0.,0.)
m1,m2,aa,wav1 = None,None,None,None
if type(outfile) == typeNone:
outfile='sum_image_'
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef} )
Yout.update({"anker":anker,"anker2":None,
"C_1":C_1,"C_2":C_2,
"Xphi":0.0,"Yphi":0.0,
"wheelpos":wheelpos,"dist12":dist12,
"hdr":hdr,"offset":offset})
Yout.update({"background_1":bg1,"background_2":bg2})
dropout_mask = None
Yout.update({"zeroxy_imgpos":[1000,1000]})
else:
# default extraction
if chatter > 2 : print ("DEBUG 894 default extraction")
# start with a quick straight slit extraction
exSpIm = extractSpecImg(specfile,ext,ankerimg,angle,spwid=spextwidth,
background_lower=background_lower, background_upper=background_upper,
template = background_template, x_offset = anchor_x_offset, ank_c_0offset=ank_c_0offset,
offsetlimit=offsetlimit, replace=replace, chatter=chatter, singleside_bkg=singleside_bkg)
dis = exSpIm['dis']
spnet = exSpIm['spnet']
bg = exSpIm['bg']
bg1 = exSpIm['bg1']
bg2 = exSpIm['bg2']
bgsig = exSpIm['bgsigma']
bgimg = exSpIm['bgimg']
bg_limits_used = exSpIm['bg_limits_used']
bgextra = exSpIm['bgextras']
extimg = exSpIm['extimg']
spimg = exSpIm['spimg']
spnetimg = exSpIm['spnetimg']
offset = exSpIm['offset']
ank_c = exSpIm['ank_c']
if background_template != None:
background_template ={"extimg":exSpIm["template_extimg"]}
Yout.update({"template":exSpIm["template_extimg"]})
if exSpIm['dropouts']:
dropout_mask = exSpIm['dropout_mask']
else: dropout_mask = None
Yout.update({"background_1":bg1,"background_2":bg2})
#msg += "1st order anchor offset from spectrum = %7.1f\n"%(offset)
#msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],ank_c[0])
calibdat = None # free the memory
if chatter > 2: print("============ straight slit extraction complete =================")
if np.max(spnet) < maxcounts: maxcounts = 2.0*np.max(spnet)
# initial limits spectrum (pixels)
m1 = ank_c[1]-400
if wheelpos > 500: m1 = ank_c[1]-370
if m1 < 0: m1 = 0
if m1 < (ank_c[2]+30): m1 = ank_c[2]+30
m2 = ank_c[1]+2000
if wheelpos > 500: m2 = ank_c[1]+1000
if m2 >= len(dis): m2 = len(dis)-2
if m2 > (ank_c[3]-40): m2=(ank_c[3]-40)
aa = list(range(int(m1),int(m2)))
wav1 = polyval(C_1,dis[aa])
# get grism det image
img = pyfits.getdata(specfile, ext)
if isinstance(replace,np.ndarray):
img = replace
try:
offset = np.asscalar(offset)
except:
pass
Yout.update({"offset":offset})
Zbg = bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra
net = extimg-bgextra[-1]
var = extimg.copy()
dims = np.asarray( img.shape )
dims = np.array([dims[1],dims[0]])
dims2 = np.asarray(extimg.shape)
dims2 = np.array([dims2[1],dims2[0]])
msg += "Lower background from y = %i pix\nLower background to y = %i pix\n" % (bg_limits_used[0],bg_limits_used[1])
msg += "Upper background from y = %i pix\nUpper background to y = %i pix\n" % (bg_limits_used[2],bg_limits_used[3])
msg += "TRACKWID =%4.1f\n" % (trackwidth)
# collect some results:
if sumimage == None:
Y0 = (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
(Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra
else:
Y0 = None, None, None, (dist12, None, None), expmap, bgimg, bg_limits_used, bgextra
angle = 0.0
# curvature from input (TBD how - placeholder with raw_input)
# choose input coef or pick from plot
# choose order to do it for
if (get_curve & interactive) | (get_curve & (get_curve_filename != None)):
if chatter > 3 : print ("DEBUG 978 get user-provided curve coefficients and extract spectrum")
spextwidth = None
# grab coefficients
poly_1 = None
poly_2 = None
poly_3 = None
if get_curve_filename == None:
try:
poly_1 = eval(input("give coefficients of first order polynomial array( [X^3,X^2,X,C] )"))
poly_2 = eval(input("give coefficients of second order polynomial array( [X^2,X,C] )"))
poly_3 = eval(input("give coefficients of third order polynomial array( [X,C] )"))
except:
print("failed")
if (type(poly_1) != list) | (type(poly_2) != list) | (type(poly_3) != list):
print("poly_1 type = ",type(poly_1))
print("poly_2 type = ",type(poly_2))
print("poly_3 type = ",type(poly_3))
raise IOError("the coefficients must be a list")
poly_1 = np.asarray(poly_1)
poly_2 = np.asarray(poly_2)
poly_3 = np.asarray(poly_3)
else:
try:
curfile = rdList(get_curve_filename)
poly_1 = np.array(curfile[0][0].split(','),dtype=float)
poly_2 = np.array(curfile[1][0].split(','),dtype=float)
poly_3 = np.array(curfile[2][0].split(','),dtype=float)
except:
print("There seems to be a problem when readin the coefficients out of the file")
print("The format is a list of coefficient separated by comma's, highest order first")
print("The first line for the first order")
print("The second line for the secons order")
print("The third line for the third order")
print("like, \n1.233e-10,-7.1e-7,3.01e-3,0.0.\n1.233e-5,-2.3e-2,0.03.0\n1.7e-1,0.9\n")
print(get_curve_filename)
print(curfile)
print(poly_1)
print(poly_2)
print(poly_3)
raise IOError("ERROR whilst reading curvature polynomial from file\n")
print("Curvature coefficients were read in...\npoly_1: %s \npoly_2: %s \npoly_3: %s \n"%
(poly_1,poly_2,poly_3))
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown), apercorr, expospec, msg, curved \
= curved_extraction(
extimg, ank_c, anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
predict_second_order=predict2nd,
background_template=background_template,
angle=angle, offset=offset,
poly_1=poly_1, poly_2=poly_2, poly_3=poly_3,
msg=msg, curved=curved,
outfull=True, expmap=expmap,
fit_second=fit_second,
fit_third=fit_second,
C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
# fit_sigmas parameter needs passing
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),(
x,xstart,xend,sp_all,quality,co_back) = fitorder
# update the anchor y-coordinate
if chatter > 3 : print ("DEBUG 1048 update anchor coordinate\noriginal ank_c=%s\ny1=%s"%(ank_c,y1))
ank_c[0] = y1[np.int(ank_c[1])]
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
# curvature from calibration
if spextwidth != None:
if chatter > 3 : print ("DEBUG 1067 get curve coefficients from cal file and extract spectrum ")
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown) , apercorr, expospec, msg, curved \
= curved_extraction(
extimg,ank_c,anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
background_lower=background_lower,
background_upper=background_upper, \
background_template=background_template,\
angle=angle, offset=offset,
outfull=True, expmap=expmap,
msg = msg, curved=curved,
fit_second=fit_second,
fit_third=fit_second, C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
(present0,present1,present2,present3),(q0,q1,q2,q3), \
(y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
(y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
(x,xstart,xend,sp_all,quality,co_back) = fitorder
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
ank_c[0] = y1[int(ank_c[1])]
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
msg += "orders present:"
if present0: msg += "0th order, "
if present1: msg += "first order"
if present2: msg += ", second order"
if present3: msg += ", third order "
print('1224 CCCCCCCCCCCCC', coef1)
print(RA,DEC)
print(anker)
print(ank_c)
msg += '\nparametrized order curvature:\n'
if present0:
for k in range(len(coef0)):
msg += "COEF0_"+str(k)+"=%12.4e\n" % (coef0[k])
if present1:
for k in range(len(coef1)):
msg += "COEF1_"+str(k)+"=%12.4e\n" % (coef1[k])
if present2:
for k in range(len(coef2)):
msg += "COEF2_"+str(k)+"=%12.4e\n" % (coef2[k])
if present3:
for k in range(len(coef3)):
msg += "COEF3_"+str(k)+"=%12.4e\n" % (coef3[k])
msg += '\nparametrized width slit:\n'
if present0:
for k in range(len(sig0coef)):
msg += "SIGCOEF0_"+str(k)+"=%12.4e\n" % (sig0coef[k])
if present1:
for k in range(len(sig1coef)):
msg += "SIGCOEF1_"+str(k)+"=%12.4e\n" % (sig1coef[k])
if present2:
for k in range(len(sig2coef)):
msg += "SIGCOEF2_"+str(k)+"=%12.4e\n" % (sig2coef[k])
if present3:
for k in range(len(sig3coef)):
msg += "SIGCOEF3_"+str(k)+"=%12.4e\n" % (sig3coef[k])
if chatter > 3 : print ("DEBUG 1142 done spectral extraction, now calibrate")
offset = ank_c[0]-slit_width/2
msg += "best fit 1st order anchor offset from spectrum = %7.1f\n"%(offset)
msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],y1[int(ank_c[1])])
msg += msg4
Yout.update({"offset":offset})
#2012-02-20 moved updateFitorder to curved_extraction
#if curved == "update":
# fit = fitorder2
#else:
# fit = fitorder
fit = fitorder
if optimal_extraction:
# development dropped, since mod8 causes slit width oscillations
# also requires a good second order flux and coi calibration for
# possible further development of order splitting.
# result in not consistent now.
print("Starting optimal extraction: This can take a few minutes ......\n\t "\
"........\n\t\t .............")
Y3 = get_initspectrum(net,var,fit,160,ankerimg,C_1=C_1,C_2=C_2,dist12=dist12,
predict2nd=predict2nd,
chatter=1)
counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
# need to test that C_2 is valid here
if predict2nd:
Y4 = predict_second_order(dis,(sp_first-bg_first), C_1,C_2, dist12, quality,dlim1L, dlim1U,wheelpos)
wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
# retrieve the effective area
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=1,arf=fluxcalfile,msg=msg,chatter=chatter)
EffArea1 = Y7[:-1]
msg = Y7[-1]
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=2,arf=None,msg=msg,chatter=chatter)
if type(Y7) == tuple:
EffArea2 = Y7[:-1]
else:
if type(Y7) != typeNone: msg = Y7
EffArea2 = None
# note that the output differs depending on parameters given, i.e., arf, anchor
Yout.update({"effarea1":EffArea1,"effarea2":EffArea2})
if interactive:
import matplotlib.pyplot as plt
if (plot_img) & (sumimage == None):
#plt.winter()
# make plot of model on image [figure 1]
#xa = np.where( (dis < 1400) & (dis > -300) )
bga = bg.copy()
fig1 = plt.figure(1); plt.clf()
img[img <=0 ] = 1e-16
plt.imshow(np.log(img),vmin=np.log(bga.mean()*0.1),vmax=np.log(bga.mean()*4))
levs = np.array([5,15,30,60,120,360]) * bg.mean()
if highlight: plt.contour(img,levels=levs)
# plot yellow wavelength marker
# TBD : add curvature
plt.plot(xpnt,ypnt,'+k',markersize=14)
if not skip_field_src:
plot_ellipsoid_regions(Xim,Yim,
Xa,Yb,Thet,b2mag,matched,ondetector,
pivot_ori,pivot_ori,dims,17.,)
if zoom:
#plt.xlim(np.max(np.array([0.,0.])),np.min(np.array([hdr['NAXIS1'],ankerimg[0]+400])))
#plt.ylim(np.max(np.array([0.,ankerimg[1]-400 ])), hdr['NAXIS2'])
plt.xlim(0,2000)
plt.ylim(0,2000)
else:
plt.xlim(0,2000)
plt.ylim(0,2000)
plt.savefig(indir+'/'+obsid+'_map.png',dpi=150)
#plt.show()
plt.close()
if (plot_raw):
#plt.winter()
nsubplots = 2
#if not fit_second: nsubplots=3
# make plot of spectrum [figure 2]
fig2 = plt.figure(2); plt.clf()
plt.subplots_adjust(top=1,hspace=0, wspace=0)
# image slice
ax21 = plt.subplot(nsubplots,1,1)
ac = -ank_c[1]
net[net<=0.] = 1e-16
#plt.imshow(np.log10(net),vmin=-0.8,vmax=0.8, #~FIXME:
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower',cmap=plt.cm.winter)
plt.imshow(np.log10(net),vmin=-10,vmax=2,
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')#,cmap=plt.cm.winter)
#plt.imshow(extimg,vmin=0,vmax=50,
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower')#,cmap=plt.cm.winter)
if highlight:
plt.contour(np.log10(net),levels=[1,1.3,1.7,2.0,3.0],
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')
#plt.imshow( extimg,vmin= (bg1.mean())*0.1,vmax= (bg1.mean()+bg1.std())*2, extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]) )
#levels = np.array([5,10,20,40,70,90.])
#levels = spnet[ank_c[2]:ank_c[3]].max() * levels * 0.01
#if highlight: plt.contour(net,levels=levels,extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]))
# cross_section_plot:
cp2 = cp2/np.max(cp2)*100
#plt.plot(ac+cp2+ank_c[1],np.arange(len(cp2)),'k',lw=2,alpha=0.6,ds='steps') #~TODO:
# plot zeroth orders
if not skip_field_src:
pivot= np.array([ank_c[1],ank_c[0]-offset])
#pivot_ori=ankerimg
mlim = 17.
if wheelpos > 500: mlim = 15.5
plot_ellipsoid_regions(Xim,Yim,Xa,Yb,Thet,b2mag,
matched,ondetector,
pivot,pivot_ori,
dims2,mlim,
img_angle=angle-180.0,ax=ax21)
# plot line on anchor location
#plt.plot([ac+ank_c[1],ac+ank_c[1]],[0,slit_width],'k',lw=2)
plt.plot(0,ank_c[0],'kx',MarkerSize=5) #~TODO:
# plot position centre of orders
#if present0: plt.plot(ac+q0[0],y0[q0[0]],'k--',lw=1.2)
#plt.plot( ac+q1[0],y1[q1[0]],'k--',lw=1.2)
#if present2: plt.plot(ac+q2[0],y2[q2[0]],'k--',alpha=0.6,lw=1.2)
#if present3: plt.plot(ac+q3[0],y3[q3[0]],'k--',alpha=0.3,lw=1.2)
# plot borders slit region
if present0:
plt.plot(ac+q0[0],borderup [0,q0[0]],'r-')
plt.plot(ac+q0[0],borderdown[0,q0[0]],'r-')
if present1:
plt.plot(ac+q1[0],borderup [1,q1[0]],'r-',lw=1.2)
plt.plot(ac+q1[0],borderdown[1,q1[0]],'r-',lw=1.2)
if present2:
plt.plot(ac+q2[0],borderup [2,q2[0]],'r-',alpha=0.6,lw=1)
plt.plot(ac+q2[0],borderdown[2,q2[0]],'r-',alpha=0.6,lw=1)
if present3:
plt.plot(ac+q3[0],borderup [3,q3[0]],'r-',alpha=0.3,lw=1.2)
plt.plot(ac+q3[0],borderdown[3,q3[0]],'r-',alpha=0.3,lw=1.2)
# plot limits background
plt_bg = np.ones(len(q1[0]))
if (background_lower[0] == None) & (background_upper[0] == None):
background_lower = [0,50] ; background_upper = [slit_width-50,slit_width]
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
else:
if background_lower[0] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[1]),'-k',lw=1.5 )
elif background_lower[1] != None:
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
if background_upper[1] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[1]),'-k',lw=1.5 )
elif background_upper[0] != None:
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
# rescale, title
plt.ylim(0,slit_width)
#plt.ylim(50,150)
if not zoom:
xlim1 = ac+ank_c[2]
xlim2 = ac+ank_c[3]
else:
xlim1 = max(ac+ank_c[2], -420)
xlim2 = min(ac+ank_c[3],1400)
plt.xlim(xlim1,xlim2)
plt.title(obsid+'+'+str(ext))
# first order raw data plot
ax22 = plt.subplot(nsubplots,1,2)
plt.rcParams['legend.fontsize'] = 'small'
if curved == 'straight':
p1, = plt.plot( dis[ank_c[2]:ank_c[3]], spnet[ank_c[2]:ank_c[3]],'k',
ds='steps',lw=0.5,alpha=0.5,label='straight')
p2, = plt.plot( dis[ank_c[2]:ank_c[3]],
spextwidth*(bg1[ank_c[2]:ank_c[3]]+bg2[ank_c[2]:ank_c[3]])*0.5,
'b',alpha=0.5,label='background')
plt.legend([p1,p2],['straight','background'],loc=0,)
if curved != "straight":
p3, = plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'r',ds='steps',label='spectrum')
plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'k',alpha=0.2,ds='steps',label='_nolegend_')
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.5,lw=1.1,ds='steps',label='background')
# bad pixels:
qbad = np.where(quality[q1[0]] > 0)
p4, = plt.plot(x[qbad],(sp_first-bg_first)[qbad],'xk',markersize=4)
#p7, = plt.plot(x[q1[0]],(bg_first)[q1[0]],'r-',alpha=0.3,label='curve_bkg')
# annotation
#plt.legend([p3,p4,p7],['spectrum','suspect','background'],loc=0,)
plt.legend([p3,p7],['spectrum','background'],loc=0,)
maxbg = np.max(bg_first[q1[0]][np.isfinite(bg_first[q1[0]])])
topcnt = 1.2 * np.max([np.max(spnet[q1[0]]),maxbg, np.max((sp_first-bg_first)[q1[0]])])
plt.ylim(np.max([ -20, np.min((sp_first-bg_first)[q1[0]])]), np.min([topcnt, maxcounts]))
if optimal_extraction:
p5, = plt.plot(x[q1[0]],counts[1,q1[0]],'g',alpha=0.5,ds='steps',lw=1.2,label='optimal' )
p6, = plt.plot(x[q1[0]],counts[1,q1[0]],'k',alpha=0.5,ds='steps',lw=1.2,label='_nolegend_' )
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.7,lw=1.1,ds='steps',label='background')
plt.legend([p3,p5,p7],['spectrum','optimal','background'],loc=0,)
topcnt = 1.2 * np.max((sp_first-bg_first)[q1[0]])
ylim1,ylim2 = -10, np.min([topcnt, maxcounts])
plt.ylim( ylim1, ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('1st order counts')
'''
# plot second order
ax23 = plt.subplot(nsubplots,1,3)
plt.rcParams['legend.fontsize'] = 'small'
#plt.xlim(ank_c[2],ank_c[3])
if fit_second:
if curved != 'straight':
p1, = plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'r',label='spectrum')
plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'k',alpha=0.2,label='_nolegend_')
p7, = plt.plot(x[q2[0]],(bg_second)[q2[0]],'y',alpha=0.7,lw=1.1,label='background')
qbad = np.where(quality[q2[0]] > 0)
p2, = plt.plot(x[qbad],(sp_second-bg_second)[qbad],'+k',alpha=0.3,label='suspect')
plt.legend((p1,p7,p2),('spectrum','background','suspect'),loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
if optimal_extraction:
p3, = plt.plot(x[q2[0]],counts[2,q2[0]],'g',alpha=0.5,ds='steps',label='optimal' )
plt.legend((p1,p7,p2,p3),('spectrum','background','suspect','optimal',),loc=2)
#plt.ylim(np.max([ -10,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
if predict2nd :
p4, = plt.plot(dis2p+dist12,flux2p, ds='steps',label='predicted')
p5, = plt.plot(dis2p[np.where(qual2p != 0)]+dist12,flux2p[np.where(qual2p != 0)],'+k',label='suspect',markersize=4)
if optimal_extraction & fit_second:
plt.legend((p1,p2,p3,p4,p5),('curved','suspect','optimal','predicted','suspect'),loc=2)
#plt.ylim(np.max([ -100,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
elif optimal_extraction:
plt.legend((p1,p7,p4,p5),('curved','background','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
elif fit_second:
plt.legend((p1,p2,p4,p5),('curved','suspect','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
else:
plt.legend((p4,p5),('predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('2nd order counts')
'''
'''
if fit_second:
ax24 = plt.subplot(nsubplots,1,4)
plt.rcParams['legend.fontsize'] = 'small'
if (len(q3[0]) > 1) & (curved != "xxx"):
p1, = plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'r',label='spectrum')
plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'k',alpha=0.2,label='_nolegend_')
qbad = np.where(quality[q3[0]] > 0)
p2, = plt.plot(x[qbad],(sp_third-bg_third)[qbad],'xk',alpha=0.3,label='suspect')
p3, = plt.plot(x[q3[0]],bg_third[q3[0]],'y',label='background')
plt.legend([p1,p3,p2],['spectrum','background','suspect'],loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q3[0]])]),\
np.min([np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
if optimal_extraction:
p4, = plt.plot(x[q3[0]],counts[3,q3[0]],'b',alpha=0.5,ds='steps',label='optimal' )
plt.legend([p1,p3,p2,p4],['spectrum','background','suspect','optimal',],loc=2)
#plt.ylim(np.max([ -100,np.min(counts[3,q3[0]]), np.min((sp_second-bg_second)[q3[0]])]),\
# np.min([np.max(counts[3,q3[0]]), np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel(u'3rd order counts')
plt.xlabel(u'pixel distance from anchor position')
'''
plt.savefig(indir+'/'+obsid+'_count.png',dpi=150)
#plt.show()
if (plot_spec):
#plt.winter()
# NEED the flux cal applied!
nsubplots = 1
if not fit_second:
nsubplots = 1
fig3 = plt.figure(3)
plt.clf()
wav1 = polyval(C_1,x[q1[0]])
ax31 = plt.subplot(nsubplots,1,1)
if curved != "xxx":
# PSF aperture correction applies on net rate, but background
# needs to be corrected to default trackwidth linearly
rate1 = ((sp_first[q1[0]]-bg_first[q1[0]] ) * apercorr[1,[q1[0]]]
/expospec[1,[q1[0]]]).flatten()
bkgrate1 = ((bg_first)[q1[0]] * (2.5/trackwidth)
/expospec[1,[q1[0]]]).flatten()
print("computing flux for plot; frametime =",framtime)
flux1,wav1,coi_valid1 = rate2flux(wav1,rate1, wheelpos,
bkgrate=bkgrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]],
#sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, effarea1=EffArea1,
spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker,
#option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
#flux1_err = 0.5*(rate2flux(,,rate+err,,) - rate2flux(,,rate-err,,))
p1, = plt.plot(wav1[np.isfinite(flux1)],flux1[np.isfinite(flux1)],
color='darkred',label=u'curved')
p11, = plt.plot(wav1[np.isfinite(flux1)&(coi_valid1==False)],
flux1[np.isfinite(flux1)&(coi_valid1==False)],'.',
color='lawngreen',
label="too bright")
# PROBLEM quality flags !!!
qbad1 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] < 16))
qbad2 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] == qflag.get("bad")))
plt.legend([p1,p11],[u'calibrated spectrum',u'too bright - not calibrated'])
if len(qbad2[0]) > 0:
p2, = plt.plot(wav1[qbad2],flux1[qbad2],
'+k',markersize=4,label=u'bad data')
plt.legend([p1,p2],[u'curved',u'bad data'])
plt.ylabel(u'1st order flux $(erg\ cm^{-2} s^{-1} \AA^{-1)}$')
# find reasonable limits flux
get_flux_limit = flux1[int(len(wav1)*0.3):int(len(wav1)*0.7)]
get_flux_limit[get_flux_limit==np.inf] = np.nan
get_flux_limit[get_flux_limit==-np.inf]= np.nan
qf = np.nanmax(get_flux_limit)
if qf > 2e-12:
qf = 2e-12
plt.ylim(0.001*qf,1.2*qf)
plt.xlim(1600,6000)
if optimal_extraction: # no longer supported (2013-04-24)
print("OPTIMAL EXTRACTION IS NO LONGER SUPPORTED")
wav1 = np.polyval(C_1,x[q1[0]])
#flux1 = rate2flux(wav1, counts[1,q1[0]]/expo, wheelpos, spectralorder=1, arf1=fluxcalfile)
flux1,wav1,coi_valid1 = rate2flux(wav1,counts[1,q1[0]]/expo, wheelpos, bkgrate=bgkrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]], #sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker, #option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
p3, = plt.plot(wav1, flux1,'g',alpha=0.5,ds='steps',lw=2,label='optimal' )
p4, = plt.plot(wav1,flux1,'k',alpha=0.5,ds='steps',lw=2,label='_nolegend_' )
#plt.legend([p1,p2,p3],['curved','suspect','optimal'],loc=0,)
plt.legend([p1,p3],['curved','optimal'],loc=0,)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
plt.ylabel(u'1st order count rate')
plt.xlim(np.min(wav1)-10,np.max(wav1))
plt.title(obsid+'+'+str(ext))
'''
if fit_second:
ax32 = plt.subplot(nsubplots,1,2)
plt.plot([1650,3200],[0,1])
plt.text(2000,0.4,'NO SECOND ORDER DATA',fontsize=16)
if curved != 'xxx':
wav2 = polyval(C_2,x[q2[0]]-dist12)
rate2 = ((sp_second[q2[0]]-bg_second[q2[0]])*
apercorr[2,[q2[0]]].flatten()/expospec[2,[q2[0]]].flatten() )
bkgrate2 = ((bg_second)[q2[0]] * (2.5/trackwidth)
/expospec[2,[q2[0]]]).flatten()
flux2,wav2,coi_valid2 = rate2flux(wav2, rate2, wheelpos,
bkgrate=bkgrate2,
co_sprate = (co_second[q2[0]]/expospec[2,[q2[0]]]).flatten(),
co_bgrate = (co_back [q2[0]]/expospec[2,[q2[0]]]).flatten(),
pixno=x[q2[0]],
arf1=fluxcalfile, arf2=None,
frametime=framtime, effarea2=EffArea2,
spectralorder=2,swifttime=tstart,
anker=anker2,
debug=False,chatter=1)
#flux1_err = rate2flux(wave,rate_err, wheelpos, spectralorder=1,)
plt.cla()
print('#############################')
print(wav2[100],flux2[100],wav2,flux2)
p1, = plt.plot(wav2,flux2,'r',label='curved')
plt.plot(wav2,flux2,'k',alpha=0.2,label='_nolegend_')
qbad1 = np.where((quality[np.array(x[q2[0]],dtype=int)] > 0) & (quality[np.array(x[q2[0]],dtype=int)] < 16))
p2, = plt.plot(wav2[qbad1],flux2[qbad1],'+k',markersize=4,label='suspect data')
plt.legend(['uncalibrated','suspect data'])
plt.ylabel(u'estimated 2nd order flux')
plt.xlim(1600,3200)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
if np.sum(qf[0]) > 0:
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
#else: plt.ylim(1e-16,2e-12)
else: plt.ylim(1e-12,1e-11)
# final fix to limits of fig 3,1
y31a,y31b = ax31.get_ylim()
setylim = False
if y31a < 1e-16:
y31a = 1e-16
setylim = True
if y31b > 1e-12:
y31b = 1e-12
setylim = True
if setylim: ax31.set_ylim(bottom=y31a,top=y31b)
#
'''
plt.xlabel(u'$\lambda(\AA)$',fontsize=16)
plt.savefig(indir+'/'+obsid+'_flux.png',dpi=150)
# to plot the three figures
#plt.show()
# output parameter
Y1 = ( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
(C_1,C_2,img), hdr,m1,m2,aa,wav1 )
# output parameter
Y2 = fit, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr, expospec
Yout.update({"Yfit":Yfit})
# writing output to a file
#try:
if wr_outfile: # write output file
if ((chatter > 0) & (not clobber)): print("trying to write output files")
import uvotio
if (curved == 'straight') & (not optimal_extraction):
ank_c2 = np.copy(ank_c) ; ank_c2[1] -= m1
F = uvotio.wr_spec(RA,DEC,filestub,ext,
hdr,anker,anker_field[0],anker_field[1],
dis[aa],wav1,
spnet[aa]/expo,bg[aa]/expo,
bg1[aa]/expo,bg2[aa]/expo,
offset,ank_c2,extimg, C_1,
history=None,chatter=1,
clobber=clobber,
calibration_mode=calmode,
interactive=interactive)
elif not optimal_extraction:
if fileversion == 2:
Y = Yout
elif fileversion == 1:
Y = (Y0,Y1,Y2,Y4)
F = uvotio.writeSpectrum(RA,DEC,filestub,ext, Y,
fileoutstub=outfile,
arf1=fluxcalfile, arf2=None,
fit_second=fit_second,
write_rmffile=write_RMF, fileversion=1,
used_lenticular=use_lenticular_image,
history=msg,
calibration_mode=calmode,
chatter=chatter,
clobber=clobber )
elif optimal_extraction:
Y = (Y0,Y1,Y2,Y3,Y4)
F = uvotio.OldwriteSpectrum(RA,DEC,filestub,ext, Y, mode=2,
quality=quality, interactive=False,fileout=outfile,
updateRMF=write_rmffile, \
history=msg, chatter=5, clobber=clobber)
#except (RuntimeError, IOError, ValueError):
# print "ERROR writing output files. Try to call uvotio.wr_spec."
# pass
# clean up fake file
if tempntags.__contains__('fakefilestub'):
filestub = tempnames[tempntags.index('fakefilestub')]
os.system('rm '+indir+filestub+'ufk_??.img ')
# update Figure 3 to use the flux...
# TBD
# write the summary
sys.stdout.write(msg)
sys.stdout.write(msg2)
flog = open(logfile,'a')
flog.write(msg)
flog.write(msg2)
flog.close()
#plt.show()
if give_result: return Y0, Y1, Y2, Y3, Y4
if give_new_result: return Yout
def extractSpecImg(file,ext,anker,angle,anker0=None,anker2=None, anker3=None,\
searchwidth=35,spwid=13,offsetlimit=None, fixoffset=None,
background_lower=[None,None], background_upper=[None,None],
template=None, x_offset = False, ank_c_0offset=False, replace=None,
clobber=True,chatter=2,singleside_bkg=False):
'''
extract the grism image of spectral orders plus background
using the reference point at 2600A in first order.
Parameters
----------
file : str
input file location
ext : int
extension of image
anker : list, ndarray
X,Y coordinates of the 2600A (1) point on the image in image coordinates
angle : float
angle of the spectrum at 2600A in first order from zemax e.g., 28.8
searchwidth : float
find spectrum with this possible offset ( in crowded fields
it should be set to a smaller value)
template : dictionary
template for the background.
use_rectext : bool
If True then the HEADAS uvotimgrism program rectext is used to extract the image
This is a better way than using ndimage.rotate() which does some weird smoothing.
offsetlimit : None, float/int, list
if None, search for y-offset predicted anchor to spectrum using searchwidth
if float/int number, search for offset only up to a distance as given from y=100
if list, two elements, no more. [y-value, delta-y] for search of offset.
if delta-y < 1, fixoffset = y-value.
History
-------
2011-09-05 NPMK changed interpolation in rotate to linear, added a mask image to
make sure to keep track of the new pixel area.
2011-09-08 NPMK incorporated rectext as new extraction and removed interactive plot,
curved, and optimize which are now olsewhere.
2014-02-28 Add template for the background as an option
2014-08-04 add option to provide a 2-element list for the offsetlimit to constrain
the offset search range.
'''
import numpy as np
import os, sys
try:
from astropy.io import fits as pyfits
except:
import pyfits
import scipy.ndimage as ndimage
#out_of_img_val = -1.0123456789 now a global
Tmpl = (template != None)
if Tmpl:
if template['sumimg']:
raise IOError("extractSpecImg should not be called when there is sumimage input")
if chatter > 4:
print('extractSpecImg parameters: file, ext, anker, angle')
print(file,ext)
print(anker,angle)
print('searchwidth,chatter,spwid,offsetlimit, :')
print(searchwidth,chatter,spwid,offsetlimit)
img, hdr = pyfits.getdata(file,ext,header=True)
if isinstance(replace,np.ndarray):
img = replace
# wcs_ = wcs.WCS(header=hdr,) # detector coordinates DETX,DETY in mm
# wcsS = wcs.WCS(header=hdr,key='S',relax=True,) # TAN-SIP coordinate type
if Tmpl:
if (img.shape != template['template'].shape) :
print("ERROR")
print("img.shape=", img.shape)
print("background_template.shape=",template['template'].shape)
raise IOError("The templare array does not match the image")
wheelpos = hdr['WHEELPOS']
if chatter > 4: print('wheelpos:', wheelpos)
if not use_rectext:
# now we want to extend the image array and place the anchor at the centre
s1 = 0.5*img.shape[0]
s2 = 0.5*img.shape[1]
d1 = -(s1 - anker[1]) # distance of anker to centre img
d2 = -(s2 - anker[0])
n1 = 2.*abs(d1) + img.shape[0] + 400 # extend img with 2.x the distance of anchor
n2 = 2.*abs(d2) + img.shape[1] + 400
#return img, hdr, s1, s2, d1, d2, n1, n2
if 2*int(n1/2) == int(n1): n1 = n1 + 1
if 2*int(n2/2) == int(n2): n2 = n2 + 1
c1 = n1 / 2 - anker[1]
c2 = n2 / 2 - anker[0]
n1 = int(n1)
n2 = int(n2)
c1 = int(c1)
c2 = int(c2)
if chatter > 3: print('array info : ',img.shape,d1,d2,n1,n2,c1,c2)
# the ankor is now centered in array a; initialize a with out_of_img_val
a = np.zeros( (n1,n2), dtype=float) + cval
if Tmpl : a_ = np.zeros( (n1,n2), dtype=float) + cval
# load array in middle
a[c1:c1+img.shape[0],c2:c2+img.shape[1]] = img
if Tmpl: a_[c1:c1+img.shape[0],c2:c2+img.shape[1]] = template['template']
# patch outer regions with something like mean to get rid of artifacts
mask = abs(a - cval) < 1.e-8
# Kludge:
# test image for bad data and make a fix by putting the image average in its place
dropouts = False
aanan = np.isnan(a) # process further for flagging
aagood = np.isfinite(a)
aaave = a[np.where(aagood)].mean()
a[np.where(aanan)] = aaave
if len( np.where(aanan)[0]) > 0 :
dropouts = True
print("extractSpecImg WARNING: BAD IMAGE DATA fixed by setting to mean of good data whole image ")
# now we want to rotate the array to have the dispersion in the x-direction
if angle < 40. :
theta = 180.0 - angle
else: theta = angle
if not use_rectext:
b = ndimage.rotate(a,theta,reshape = False,order = 1,mode = 'constant',cval = cval)
if Tmpl:
b_ = ndimage.rotate(a_,theta,reshape = False,order = 1,mode = 'constant',cval = cval)
if dropouts: #try to rotate the boolean image
aanan = ndimage.rotate(aanan,theta,reshape = False,order = 1,mode = 'constant',)
e2 = int(0.5*b.shape[0])
c = b[e2-int(slit_width/2):e2+int(slit_width/2),:]
if Tmpl: c_ = b_[e2-int(slit_width/2):e2+int(slit_width/2),:]
if dropouts: aanan = aanan[e2-int(slit_width/2):e2+int(slit_width/2),:]
ank_c = [ (c.shape[0]-1)/2+1, (c.shape[1]-1)/2+1 , 0, c.shape[1]] #~TODO:
if x_offset == False:
pass
else:
ank_c[1] += x_offset
if use_rectext:
# history: rectext is a fortran code that maintains proper density of quantity when
# performing a rotation.
# build the command for extracting the image with rectext
outfile= tempnames[tempntags.index('rectext')]
cosangle = np.cos(theta/180.*np.pi)
sinangle = np.sin(theta/180.*np.pi)
# distance anchor to pivot
dx_ank = - (hdr['naxis1']-anker[0])/cosangle + slit_width/2*sinangle #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
if np.abs(dx_ank) > 760: dx_ank = 760 # include zeroth order (375 for just first order)
# distance to end spectrum
dx_2 = -anker[0] /cosangle + slit_width/2/sinangle # to lhs edge #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
dy_2 = (hdr['naxis2']-anker[1])/sinangle - slit_width/2/cosangle # to top edge #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
dx = int(dx_ank + np.array([dx_2,dy_2]).min() ) # length rotated spectrum
dy = slit_width # width rotated spectrum
# pivot x0,y0
x0 = anker[0] - dx_ank*cosangle + dy/2.*sinangle
y0 = anker[1] - dx_ank*sinangle - dy/2.*cosangle
command= "rectext infile="+file+"+"+str(ext)
command+=" outfile="+outfile
command+=" angle="+str(theta)+" width="+str(dx)
command+=" height="+str(dy)+" x0="+str(x0)+" y0="+str(y0)
command+=" null="+str(cval)
command+=" chatter=5 clobber=yes"
print(command)
os.system(command)
c = extimg = pyfits.getdata(outfile,0)
ank_c = np.array([int(slit_width/2),dx_ank,0,extimg.shape[1]])
# out_of_img_val = 0.
if clobber:
os.system("rm "+outfile)
if Tmpl:
raise("background_template cannot be used with use_rectext option")
# version 2016-01-16 revision:
# the background can be extracted via a method from the strip image
#
# extract the strips with the background on both sides, and the spectral orders
# find optimised place of the spectrum
# first find parts not off the detector -> 'qofd'
eps1 = 1e-15 # remainder after resampling for intel-MAC OSX system (could be jacked up)
qofd = np.where( abs(c[int(slit_width/2),:] - cval) > eps1 )
# define constants for the spectrum in each mode
if wheelpos < 300: # UV grism
disrange = 150 # perhaps make parameter in call?
disscale = 10 # ditto
minrange = disrange/10 # 300 is maximum
maxrange = np.array([disrange*disscale,c.shape[1]-ank_c[1]-2]).min() # 1200 is most of the spectrum
else: # V grism
disrange = 120 # perhaps make parameter in call?
disscale = 5 # ditto
minrange = np.array([disrange/2,ank_c[1]-qofd[0].min() ]).max() # 300 is maximum
maxrange = np.array([disrange*disscale,c.shape[1]-ank_c[1]-2],qofd[0].max()-ank_c[1]).min() # 600 is most of the spectrum
if chatter > 1:
#print 'image was rotated; anchor in extracted image is ', ank_c[:2]
#print 'limits spectrum are ',ank_c[2:]
print('finding location spectrum from a slice around anchor x-sized:',minrange,':',maxrange)
print('offsetlimit = ', offsetlimit)
d = (c[:,int(ank_c[1]-minrange):int(ank_c[1]+maxrange)]).sum(axis=1).squeeze()
if len(qofd[0]) > 0:
ank_c[2] = min(qofd[0])
ank_c[3] = max(qofd[0])
else:
ank_c[2] = -1
ank_c[3] = -1
# y-position of anchor spectrum in strip image (allowed y (= [50,150], but search only in
# range defined by searchwidth (default=35) )
y_default=int(slit_width/2) # reference y
if (type(offsetlimit) == list):
if (len(offsetlimit)==2):
# sane y_default
if (offsetlimit[0] > 50) & (offsetlimit[0] < 150):
y_default=int(offsetlimit[0]+0.5) # round to nearest pixel
else:
raise IOError("parameter offsetlimit[0]=%i, must be in range [51,149]."+
"\nIs the aspect correction right (in reference images)?"%(offsetlimit[0]))
if offsetlimit[1] < 1:
fixoffset = offsetlimit[0]-int(slit_width/2)
else:
searchwidth=int(offsetlimit[1]+0.5)
if fixoffset == None:
offset = ( (np.where(d == (d[y_default-searchwidth:y_default+searchwidth]).max() ) )[0] - y_default )
if chatter>0: print('offset found from y=%i is %i '%(y_default ,-offset))
if len(offset) == 0:
print('offset problem: offset set to zero')
offset = 0
offset = offset[0]
if (type(offsetlimit) != list):
if (offsetlimit != None):
if abs(offset) >= offsetlimit:
offset = 0
print('This is larger than the offsetlimit. The offset has been set to 0')
if interactive:
offset = float(input('Please give a value for the offset: '))
else:
offset = fixoffset
if ank_c_0offset == True:
offset = 0
if chatter > 0:
print('offset used is : ', -offset)
if (type(offsetlimit) == list) & (fixoffset == None):
ank_c[0] = offsetlimit[0]-offset
else:
ank_c[0] += offset
print('image was rotated; anchor in extracted image is [', ank_c[0],',',ank_c[1],']')
print('limits spectrum on image in dispersion direction are ',ank_c[2],' - ',ank_c[3])
# Straight slit extraction (most basic extraction, no curvature):
sphalfwid = int(spwid-0.5)/2
splim1 = int(slit_width/2)+offset-sphalfwid+1
splim2 = splim1 + spwid
spimg = c[int(splim1):int(splim2),:]
if chatter > 0:
print('Extraction limits across dispersion: splim1,splim2 = ',splim1,' - ',splim2)
bg, bg1, bg2, bgsigma, bgimg, bg_limits, bgextras = findBackground(c,
background_lower=background_lower, background_upper=background_upper,yloc_spectrum=ank_c[0] )
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
bgmean = bg
bg = 0.5*(bg1+bg2)
if chatter > 0: print('Background : %10.2f +/- %10.2f (1-sigma error)'%( bgmean,bgsigma))
# define the dispersion with origen at the projected position of the
# 2600 point in first order
dis = np.arange((c.shape[1]),dtype=np.int16) - ank_c[1]
# remove the background
#bgimg_ = 0.* spimg.copy()
#for i in range(bgimg_.shape[0]): bgimg_[i,:]=bg
spnetimg = spimg - bg
spnet = spnetimg.sum(axis=0)
result = {"dis":dis,"spnet":spnet,"bg":bg,"bg1":bg1,
"bg2":bg2,"bgsigma":bgsigma,"bgimg":bgimg,
"bg_limits_used":bg_limits,"bgextras":bgextras,
"extimg":c,"spimg":spimg,"spnetimg":spnetimg,
"offset":offset,"ank_c":ank_c,'dropouts':dropouts}
if dropouts: result.update({"dropout_mask":aanan})
if Tmpl: result.update({"template_extimg":c_})
return result
def sigclip1d_mask(array1d, sigma, badval=None, conv=1e-5, maxloop=30):
"""
sigma clip array around mean, using number of sigmas 'sigma'
after masking the badval given, requiring finite numbers, and
either finish when converged or maxloop is reached.
return good mask
"""
import numpy as np
y = np.asarray(array1d)
if badval != None:
valid = (np.abs(y - badval) > 1e-6) & np.isfinite(y)
else:
valid = np.isfinite(y)
yv = y[valid]
mask = yv < (yv.mean() + sigma * yv.std())
ym_ = yv.mean()
ymean = yv[mask].mean()
yv = yv[mask]
while (np.abs(ym_-ymean) > conv*np.abs(ymean)) & (maxloop > 0):
ym_ = ymean
mask = ( yv < (yv.mean() + sigma * yv.std()) )
yv = yv[mask]
ymean = yv.mean()
maxloop -= 1
valid[valid] = y[valid] < ymean + sigma*yv.std()
return valid
def background_profile(img, smo1=30, badval=None):
"""
helper routine to determine for the rotated image
(spectrum in rows) the background using sigma clipping.
"""
import numpy as np
from scipy import interpolate
bgimg = img.copy()
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# look at the summed rows of the image
u_ysum = []
for i in range(ny):
u_ysum.append(bgimg[i,:].mean())
u_ysum = np.asarray(u_ysum)
u_ymask = sigclip1d_mask(u_ysum, 2.5, badval=badval, conv=1e-5, maxloop=30)
u_ymean = u_ysum[u_ymask].mean()
# look at the summed columns after filtering bad rows
u_yindex = np.where(u_ymask)[0]
u_xsum = []
u_std = []
for i in range(nx):
u_x1 = bgimg[u_yindex, i].squeeze()
# clip u_x1
u_x1mask = sigclip1d_mask(u_x1, 2.5, badval=None, conv=1e-5, maxloop=30)
u_xsum.append(u_x1[u_x1mask].mean())
u_std.append(u_x1[u_x1mask].std())
#print u_x1[u_x1mask]
#if np.isfinite(u_x1mask.mean()) & len(u_x1[u_x1mask])>0:
# print "%8.2f %8.2f %8.2f "%(u_x1[u_x1mask].mean(),u_x1[u_x1mask].std(),u_x1[u_x1mask].max())
# the best background estimate of the typical row is now u_xsum
# fit a smooth spline through the u_xsum values (or boxcar?)
#print "u_x means "
#print u_xsum
u_xsum = np.asarray(u_xsum)
u_std = np.asarray(u_std)
u_xsum_ok = np.isfinite(u_xsum)
bg_tcp = interpolate.splrep(np.arange(nx)[u_xsum_ok],
np.asarray(u_xsum)[u_xsum_ok], s=smo1)
# representative background profile in column
u_x = interpolate.splev(np.arange(nx), bg_tcp, )
return u_xsum, u_x, u_std
def findBackground(extimg,background_lower=[None,None], background_upper=[None,None],yloc_spectrum=int(slit_width/2),
smo1=None, smo2=None, chatter=2):
'''Extract the background from the image slice containing the spectrum.
Parameters
----------
extimg : 2D array
image containing spectrum. Dispersion approximately along x-axis.
background_lower : list
distance in pixels from `yloc_spectrum` of the limits of the lower background region.
background_upper : list
distance in pixels from `yloc_spectrum` of the limits of the upper background region.
yloc_spectrum : int
pixel `Y` location of spectrum
smo1 : float
smoothing parameter passed to smoothing spline fitting routine. `None` for default.
smo2 : float
smoothing parameter passed to smoothing spline fitting routine. `None` for default.
chatter : int
verbosity
Returns
-------
bg : float
mean background
bg1, bg2 : 1D arrays
bg1 = lower background; bg2 = upper background
inherits size from extimg.shape x-xoordinate
bgsig : float
standard deviation of background
bgimg : 2D array
image of the background constructed from bg1 and/or bg2
bg_limits_used : list, length 4
limits used for the background in the following order: lower background, upper background
(bg1_good, bg1_dis, bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin) : tuple
various other background measures
Notes
-----
**Global parameter**
- **background_method** : {'boxcar','splinefit'}
The two background images can be computed 2 ways:
1. 'splinefit': sigma clip image, then fit a smoothing spline to each
row, then average in y for each background region
2. 'boxcar': select the background from the smoothed image created
by method 1 below.
3. 'sigmaclip': do sigma clipping on rows and columns to get column
profile background, then clip image and mask, interpolate over masked
bits.
extimg is the image containing the spectrum in the 1-axis centered in 0-axis
`ank` is the position of the anchor in the image
I create two background images:
1. split the image strip into 40 portions in x, so that the background variation is small
compute the mean
sigma clip (3 sigma) each area to to the local mean
replace out-of-image pixels with mean of whole image (2-sigma clipped)
smooth with a boxcar by the smoothing factor
2. compute the background in two regions upper and lower
linearly interpolate in Y between the two regions to create a background image
bg1 = lower background; bg2 = upper background
smo1, smo2 allow one to relax the smoothing factor in computing the smoothing spline fit
History
-------
- 8 Nov 2011 NPM Kuin complete overhaul
things to do: get quality flagging of bad background points, edges perhaps done here?
- 13 Aug 2012: possible problem was seen of very bright sources not getting masked out properly
and causing an error in the background that extends over a large distance due to the smoothing.
The cause is that the sources are more extended than can be handled by this method.
A solution would be to derive a global background
- 30 Sep 2014: background fails in visible grism e.g., 57977004+1 nearby bright spectrum
new method added (4x slower processing) to screen the image using sigma clipping
'''
import sys
import numpy as np
try:
from convolve import boxcar
except:
from stsci.convolve import boxcar
from scipy import interpolate
import stsci.imagestats as imagestats
# initialize parameters
bgimg = extimg.copy()
out = np.where( (np.abs(bgimg-cval) <= 1e-6) )
in_img = np.where( (np.abs(bgimg-cval) > 1e-6) & np.isfinite(bgimg) )
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# sigma screening of background taking advantage of the dispersion being
# basically along the x-axis
if _PROFILE_BACKGROUND_:
bg, u_x, bg_sig = background_profile(bgimg, smo1=30, badval=cval)
u_mask = np.zeros((ny,nx),dtype=bool)
for i in range(ny):
u_mask[i,(bgimg[i,:].flatten() < u_x) &
np.isfinite(bgimg[i,:].flatten())] = True
bkg_sc = np.zeros((ny,nx),dtype=float)
# the following leaves larger disps in the dispersion but less noise;
# tested but not implemented, as it is not as fast and the mean results
# are comparable:
#for i in range(ny):
# uf = interpolate.interp1d(np.where(u_mask[i,:])[0],bgimg[i,u_mask[i,:]],bounds_error=False,fill_value=cval)
# bkg_sc[i,:] = uf(np.arange(nx))
#for i in range(nx):
# ucol = bkg_sc[:,i]
# if len(ucol[ucol != cval]) > 0:
# ucol[ucol == cval] = ucol[ucol != cval].mean()
for i in range(nx):
ucol = bgimg[:,i]
if len(ucol[u_mask[:,i]]) > 0:
ucol[np.where(u_mask[:,i] == False)[0] ] = ucol[u_mask[:,i]].mean()
bkg_sc[:,i] = ucol
if background_method == 'sigmaclip':
return bkg_sc
else:
# continue now with the with screened image
bgimg = bkg_sc
kx0 = 0 ; kx1 = nx # default limits for valid lower background
kx2 = 0 ; kx3 = nx # default limits for valid upper background
ny4 = int(0.25*ny) # default width of each default background region
sig1 = 1 # unit for background offset, width
bg_limits_used = [0,0,0,0] # return values used
## in the next section I replace the > 2.5 sigma peaks with the mean
## after subdividing the image strip to allow for the
## change in background level which can be > 2 over the
## image. Off-image parts are set to image mean.
# this works most times in the absence of the sigma screening,but
# can lead to overestimates of the background.
# the call to the imagestats package is only done here, and should
# consider replacement. Its not critical for the program.
#
xlist = np.linspace(0,bgimg.shape[1],80)
xlist = np.asarray(xlist,dtype=int)
imgstats = imagestats.ImageStats(bgimg[in_img[0],in_img[1]],nclip=3)
bg = imgstats.mean
bgsig = imgstats.stddev
if chatter > 2:
sys.stderr.write( 'background statistics: mean=%10.2f, sigma=%10.2f '%
(imgstats.mean, imgstats.stddev))
# create boolean image flagging good pixels
img_good = np.ones(extimg.shape,dtype=bool)
# flag area out of picture as bad
img_good[out] = False
# replace high values in image with estimate of mean and flag them as not good
for i in range(78):
# after the sigma screening this is a bit of overkill, leave in for now
sub_bg = boxcar(bgimg[:,xlist[i]:xlist[i+2]] , (5,5), mode='reflect', cval=cval)
sub_bg_use = np.where( np.abs(sub_bg - cval) > 1.0e-5 ) # list of coordinates
imgstats = None
if sub_bg_use[0].size > 0:
imgstats = imagestats.ImageStats(sub_bg[sub_bg_use],nclip=3)
# patch values in image (not out of image) with mean if outliers
aval = 2.0*imgstats.stddev
img_clip_ = (
(np.abs(bgimg[:,xlist[i]:xlist[i+2]]-cval) < 1e-6) |
(np.abs(sub_bg - imgstats.mean) > aval) |
(sub_bg <= 0.) | np.isnan(sub_bg) )
bgimg[:,xlist[i]:xlist[i+2]][img_clip_] = imgstats.mean # patch image
img_good[:,xlist[i]:xlist[i+2]][img_clip_] = False # flag patches
# the next section selects the user-selected or default background for further processing
if chatter > 1:
if background_method == 'boxcar':
sys.stderr.write( "BACKGROUND METHOD: %s; background smoothing = %s\n"%
(background_method,background_smoothing))
else:
sys.stderr.write( "BACKGROUND METHOD:%s\n"%(background_method ))
if not ((background_method == 'splinefit') | (background_method == 'boxcar') ):
sys.stderr.write('background method missing; currently reads : %s\n'%(background_method))
if background_method == 'boxcar':
# boxcar smooth in x,y using the global parameter background_smoothing
bgimg = boxcar(bgimg,background_smoothing,mode='reflect',cval=cval)
if background_lower[0] == None:
bg1 = bgimg[0:ny4,:].copy()
bg_limits_used[0]=0
bg_limits_used[1]=ny4
bg1_good = img_good[0:ny4,:]
kx0 = np.min(np.where(img_good[0,:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx1 = np.max(np.where(img_good[0,:]))-10
else:
# no curvature, no second order: limits
bg1_1= np.max(np.array([yloc_spectrum - sig1*background_lower[0],20 ]))
#bg1_0= np.max(np.array([yloc_spectrum - sig1*(background_lower[0]+background_lower[1]),0]))
bg1_0= np.max(np.array([yloc_spectrum - sig1*(background_lower[1]),0]))
bg1 = bgimg[int(bg1_0):int(bg1_1),:].copy()
bg_limits_used[0]=bg1_0
bg_limits_used[1]=bg1_1
bg1_good = img_good[int(bg1_0):int(bg1_1),:]
kx0 = np.min(np.where(img_good[int(bg1_0),:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx1 = np.max(np.where(img_good[int(bg1_0),:]))-10 # corrected for edge effects
#if ((kx2-kx0) < 20):
# print 'not enough valid upper background points'
if background_upper[0] == None:
bg2 = bgimg[-ny4:ny,:].copy()
bg_limits_used[2]=ny-ny4
bg_limits_used[3]=ny
bg2_good = img_good[-ny4:ny,:]
kx2 = np.min(np.where(img_good[ny-1,:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx3 = np.max(np.where(img_good[ny-1,:]))-10
else:
bg2_0= np.min(np.array([yloc_spectrum + sig1*background_upper[0],(slit_width-20) ]))
#bg2_1= np.min(np.array([yloc_spectrum + sig1*(background_upper[0]+background_upper[1]),ny]))
bg2_1= np.min(np.array([yloc_spectrum + sig1*(background_upper[1]),ny]))
bg2 = bgimg[int(bg2_0):int(bg2_1),:].copy()
bg_limits_used[2]=bg2_0
bg_limits_used[3]=bg2_1
bg2_good = img_good[int(bg2_0):int(bg2_1),:]
kx2 = np.min(np.where(img_good[int(bg2_1),:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx3 = np.max(np.where(img_good[int(bg2_1),:]))-10
#if ((kx3-kx2) < 20):
# print 'not enough valid upper background points'
if background_method == 'boxcar':
bg1 = bg1_dis = bg1.mean(0)
bg2 = bg2_dis = bg2.mean(0)
bg1_dis_good = np.zeros(nx,dtype=bool)
bg2_dis_good = np.zeros(nx,dtype=bool)
for i in range(nx):
bg1_dis_good[i] = np.where(bool(int(bg1_good[:,i].mean(0))))
bg2_dis_good[i] = np.where(bool(int(bg2_good[:,i].mean(0))))
if background_method == 'splinefit':
# mean bg1_dis, bg2_dis across dispersion
bg1_dis = np.zeros(nx) ; bg2_dis = np.zeros(nx)
for i in range(nx):
bg1_dis[i] = bg1[:,i][bg1_good[:,i]].mean()
if not bool(int(bg1_good[:,i].mean())):
bg1_dis[i] = cval
bg2_dis[i] = bg2[:,i][bg2_good[:,i]].mean()
if not bool(int(bg2_good[:,i].mean())):
bg2_dis[i] = cval
# some parts of the background may have been masked out completely, so
# find the good points and the bad points
bg1_dis_good = np.where( np.isfinite(bg1_dis) & (np.abs(bg1_dis - cval) > 1.e-7) )
bg2_dis_good = np.where( np.isfinite(bg2_dis) & (np.abs(bg2_dis - cval) > 1.e-7) )
bg1_dis_bad = np.where( ~(np.isfinite(bg1_dis) & (np.abs(bg1_dis - cval) > 1.e-7)) )
bg2_dis_bad = np.where( ~(np.isfinite(bg2_dis) & (np.abs(bg2_dis - cval) > 1.e-7)) )
# fit a smoothing spline to each background
x = bg1_dis_good[0]
s = len(x) - np.sqrt(2.*len(x))
if smo1 != None: s = smo1
if len(x) > 40: x = x[7:len(x)-7] # clip end of spectrum where there is downturn
w = np.ones(len(x))
tck1 = interpolate.splrep(x,bg1_dis[x],w=w,xb=bg1_dis_good[0][0],xe=bg1_dis_good[0][-1],k=3,s=s)
bg1 = | np.ones(nx) | numpy.ones |
# ------------------------------------------ Import libraries ------------------------------------------#
import numpy as np
import pandas as pd
import re
from time import time, gmtime, strftime
from scipy.stats import itemfreq
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# ------------------------------------------ Settings and CONSTANTS ---------------------------------------------------#
from matplotlib import rcParams
rcParams['figure.figsize'] = 12, 8
# ------------------------------------------ General Helper functions -------------------------------------------------#
def array2df(X_train, feature_names):
"""Convert np array to df, use with correlation matrix"""
return pd.DataFrame(X_train, columns=feature_names)
def time_lap(start_time=None):
"""Stopwatch, No param = set, param (start) is time elapsed since start"""
if start_time == None:
return time()
return strftime("%Hh:%Mm:%Ss", gmtime(time() - start_time))
def clean_locals(locals):
params = ''
try:
params = str(list(locals)[0]).replace('\n', '')
params = re.sub(' +', ' ', params)
params = re.search(r'\((.*)\)', params).group(1)
except:
pass
return params
def num_features(df):
return df.select_dtypes(include=np.number).columns.tolist()
def cat_features(df):
return df.select_dtypes(include=['object', 'category']).columns
def date_features(df):
return df.columns.str.extractall(r'(.*date.*)')[0].values.tolist()
def clip_outliers(values, p=99):
"""clip at 1.5 IQR"""
min = np.percentile(values, 100-p)
max = np.percentile(values, p)
return | np.clip(values, min, max) | numpy.clip |
import numpy as np
from numpy import linalg as LA
import random as rnd
from math import *
def rand(fCA=0):
# module returning random values as a tuple (x,x,z)
# one can tune the values here to make an artificial plan
x = rnd.choice((-1, 1)) * rnd.random() * 100
y = rnd.choice((-1, 1)) * rnd.random() * 100
z = rnd.choice((-1, 1)) * rnd.random() * 100 * fCA # fCA = c/a
return np.array([x, y, z])
def Centroid(r):
# derivation of centroid r0
N = len(r)
r0 = np.divide(np.sum(r, axis=0), N)
return r0
def PlaneFitting(r, D=3):
# module taking r (np.array) and dimensionality D of problem then returning the best fitting orthogonal eigen vector of an hypothetic plane as a vector tuple (x,y,z)
N = len(r)
r0 = Centroid(r)
T0 = np.zeros((3, 3))
i = 0
while i < N:
# loop calculating the moment of inertia tensor T0
rs = np.subtract(r[i], r0)
sq = np.dot(rs, rs)
id = np.multiply(sq, np.identity(3))
id = np.subtract(id, np.outer(rs, np.transpose(rs)))
T0 = np.add(T0, id)
i += 1
# returning the eigen values (w) and eigen vectors (v) of tensor T0
w, v = LA.eig(T0)
v = np.transpose(v)
# returning index of highest eigen value
i = np.argmax(w)
if D == 2:
w[i] = 0
# returning index of second highest eigen value
i = np.argmax(w)
return v[i]
if D == 1:
w[i] = 0
# returning index of second third eigen value
i = np.argmax(w)
return v[i]
return v[i]
def RndValArr(N=1000, fCA=0):
# module returning random values as N tuples (x,x,z)
tab = np.empty([N, 3])
for i in range(0, N):
val = rand(fCA)
tab[i] = val
i += 1
return tab
def RndRotMat():
# module returning random 3D rotation matrix
a = rnd.choice((-1, 1)) * rnd.random() * 2 * np.pi
b = rnd.choice((-1, 1)) * rnd.random() * 2 * np.pi
c = rnd.choice((-1, 1)) * rnd.random() * 2 * np.pi
Rotx = ([[1,0,0],[0,np.cos(a),-np.sin(a)],[0,np.sin(a),np.cos(a)]])
Roty = ([[np.cos(b),0,np.sin(b)],[0,1,0],[-np.sin(b),0,np.cos(b)]])
Rotz = ([[np.cos(c),-np.sin(c),0],[np.sin(c),np.cos(c),0],[0,0,1]])
matrix = np.matmul(Rotx,Roty)
matrix = np.matmul(matrix,Rotz)
return matrix
def GalCoord(n):
# module returning a cartesian coordinate vector n(x,y,z) into a galactic coordinate vector n(l,b) (in deg°)
x = n[0]
y = n[1]
z = n[2]
if x != 0:
l = np.arctan(y / x)
l = np.rad2deg(l)
else:
l = 0.0
if x != 0 and y != 0:
b = np.arctan(z / sqrt(x * x + y * y))
b = np.rad2deg(b)
else:
b = 90.0
return np.array([l, b])
def GalPlot(r, n = ([0,0,0]), size = 100, title = ""): # plot galactic plan given galaxies positions r(x,y,z) and normal vector n(x,y,z)
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
r0 = Centroid(r)
fig = plt.figure()
ax = plt.axes(projection="3d")
z_points = []
x_points = []
y_points = []
i = 0
while i < len(r):
z_points.append(r[i, (2)])
x_points.append(r[i, (0)])
y_points.append(r[i, (1)])
i = i + 1
ax.scatter3D(x_points, y_points, z_points, c=z_points, cmap='hsv') # plotting galaxies position
n = np.multiply(n, size)
plt.plot([r0[0], r0[0] + n[0]], [r0[1], r0[1] + n[1]], [r0[2], r0[2] + n[2]]) # plotting normal
plt.title(title)
plt.show()
return ()
def main():
Ngal = 10
Nmin = 0
Nmax = 30
stat = 10000
CA = 0.5
result = np.zeros([Nmax - Nmin + 1, 3])
tracker = 0
for Galpol in range(Nmin, Nmax + 1):
i = Galpol + Ngal
Nn = []
Nnproj = []
rproj = np.zeros([i, 3])
for k in range(0, stat):
rndproj = rnd.choice((0, 1, 2))
matrot = RndRotMat()
normal = [0, 0, 1]
normalext = np.zeros([3, 3])
normalext[0] = normal
normalext = np.matmul(normalext, matrot)
normal = normalext[0]
#r = RndValArr(i, CA)
r = np.concatenate((RndValArr(Ngal,CA),RndValArr(Galpol,1)))
for j in range(0, i-1):
riext = np.zeros([3, 3])
riext[0] = r[j]
riext = np.matmul(riext,matrot)
r[j] = riext[0]
projvector = np.zeros([1, 3])
if rndproj == 0:
projvector = ([[0,1,1]])
elif rndproj == 1:
projvector = ([[1, 0, 1]])
elif rndproj == 2:
projvector = ([[1,1,0]])
rproj = | np.multiply(r, projvector) | numpy.multiply |
from copy import deepcopy
import re
import numpy as np
'''
Board class for the game of TicTacToe.
Default board size is 3x3.
Board data:
1=white(O), -1=black(X), 0=empty
first dim is column , 2nd is row:
pieces[0][0] is the top left square,
pieces[2][0] is the bottom left square,
Squares are stored and manipulated as (x,y) tuples.
Author: <NAME>, github.com/evg-tyurin
Date: Jan 5, 2018.
Based on the board for the game of Othello by <NAME>.
'''
# from bkcharts.attributes import color
class Board():
# list of all 8 directions on the board, as (x,y) offsets
__directions = [(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1),(0,1)]
SIZE = 4
def __init__(self, n=SIZE, initial = 0):
"Set up initial board configuration."
self.n = n
# Create the empty board array.
self.pieces = [None]*self.n
for i in range(self.n):
self.pieces[i] = [initial]*self.n
self.pieces = np.array(self.pieces)
self.mask_pieces=[]
def __str__(self):
return str(self.pieces)
# add [][] indexer syntax to the Board
def __getitem__(self, index):
#print('Tgus us index', index,self.pieces)
return self.pieces[index]
def get_mask_pieces(self):
ind = np.where(self.pieces==0)
self.mask_pieces = [(ind[0][i],ind[1][i]) for i in range(ind[0].shape[0])]
def get_legal_moves(self,final=True,valid = True):
"""Returns all the legal moves for the given color.
(1 for white, -1 for black)
@param color not used and came from previous version.
"""
moves = set() # stores the legal moves.
states_to_moves = {}
# Get all the empty squares (color==0)
for y in range(self.n):
for x in range(self.n):
# if the piece is removed in leaf based search, mask it
if self[x][y]==0 and (x,y) not in self.mask_pieces:
newmove = (x,y)
moves.add(newmove)
# following codes is to ignore suicide moves
moves = list(moves)
#print self.pieces
copy = deepcopy(self)
all_possible_states = []
for m in moves:
self = deepcopy(copy)
self.execute_move(m,1)
states_to_moves[str(self)]=m
all_possible_states.append(self)
if valid == True:
all_possible_states = self.validatePossibleMoves(all_possible_states,final)
# print('all_possible_states',all_possible_states)
# print(states_to_moves[str(all_possible_states[0])])
# print('length of states',len(all_possible_states))
if all_possible_states ==[]:
mask_pieces = deepcopy(copy.mask_pieces)
copy.mask_pieces = []
return mask_pieces
self = deepcopy(copy)
return [states_to_moves[str(s)] for s in all_possible_states]
def validatePossibleMoves(self,all_possible_states,final):
winners = []
for state in all_possible_states:
winners.append(state.is_win(1))
if final:
if all(win == True for win in winners):
return all_possible_states
else:
return [all_possible_states[w] for w in range(len(winners)) if winners[w] == 0]
else:
if all(win == True for win in winners):
return []
else:
return [all_possible_states[w] for w in range(len(winners)) if winners[w] == 0]
def Randomly_remove(self, depth = 2):
'''
remove pieces from board in random based on depth
'''
#print 'board:',self
# return array of index of all pieces on the board and
#state_copy = np.copy(self.pieces)
indice = np.where(self.pieces == 1)
depth = int(self.pieces.sum()/2)
#print 'indice', indice
# choose (depth) index at random
removed_indice = np.random.choice(indice[0].shape[0],depth,replace=False)
for i in removed_indice:
x,y = indice[0][i], indice[1][i]
#print x,y
self[x][y]=0
return self
def All_remove(self):
'''
remove pieces from board in random based on depth
'''
#print 'board:',self
# return array of index of all pieces on the board and
#state_copy = np.copy(self.pieces)
self.pieces = | np.zeros((Board.SIZE,Board.SIZE)) | numpy.zeros |
#!/usr/bin/python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import sys
import pprint
import numpy as np
import pickle
import itertools
import config
from agents.learning import LearningAgent
from environment.bandit import Bandit
from experiments.experiment import Experiment, ParallelExperiment
from agents.gaussian import *
from agents.uniform import *
from agents.pruning import *
from agents.delegator import Delegator
from util import meeting_point
def main():
config_obj = config.Config.get_instance()
settings = config_obj.parse(sys.argv[1])
expNumber = int(sys.argv[2])
team_sizes = settings['team_sizes']
bandit_sizes = settings['bandit_sizes']
mus = settings['mus']
sigmas = settings['sigmas']
trials = settings['trials']
executions = settings['executions']
experiments = []
#exp_dict = {}
# values of the prob. distribution of agent generation
# it vary in nature if we're dealing with gaussian or uniform
dist_params = settings['upper_bounds']
if settings['ltd_type'] == 'gaussian':
# must use list comprehension otherwise generator is consumed in 1st use
dist_params = [x for x in itertools.product(settings['mus'], settings['sigmas'])]
#name = "results_gaussian"
print('Parameters:')
pprint.PrettyPrinter().pprint(settings)
# execution rewards
execution_rwd_lta = | np.zeros((executions, trials)) | numpy.zeros |
'''
Created on Oct 25, 2017
@author: cef
'''
import logging, copy, os, time, inspect
import numpy as np
import pandas as pd
import xlrd #this is here to test the optional dependency
#import matplotlib.pyplot as plt
from collections import OrderedDict
#===============================================================================
# other helpers
#===============================================================================
import model.sofda.hp.basic as basic
mod_logger = logging.getLogger(__name__) #creates a child logger of the root
#===============================================================================
# pandas styling
#===============================================================================
pd.set_option("display.max_columns", 7)
pd.set_option('display.width', 150)
def view_web_df(df):
import webbrowser
#import pandas as pd
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(delete=False, suffix='.html', mode='w') as f:
#type(f)
df.to_html(buf=f)
webbrowser.open(f.name)
def view(df):
view_web_df(df)
#===============================================================================
# # SIMPLE MATH ------------------------------------------------------------------
#===============================================================================
def sum_occurances(df_raw, logger=mod_logger): #return a dictionary of counts per occurance
"""
try value_counts()?
"""
logger = logger.getChild('sum_occurances')
df = df_raw.copy()
l = np.unique(df).tolist()
d = dict()
#=======================================================================
# loop and calc
#=======================================================================
logger.debug('looping through %i unique values: %s'%(len(l), l))
for val in l:
d[val] = (df == val).sum().sum()
return d
#===============================================================================
# INTERPOLATION and SEARCH --------------------------------------------------------------
#===============================================================================
def Get_interp_header_dx2ser(df_raw, header_search, value_ask_raw, logger=mod_logger): # Vlookup. return row (series) at [value_ask, header_search]
""""
Most robust interpolator
#===========================================================================
# USE
#===========================================================================
interp_ser = hp_pd.Get_interp_header_dx2ser(df_raw, header_search, value_ask_raw)
interp_value = interp_ser[header_desire]
#===========================================================================
# FUNCTION
#===========================================================================
add a row and interpolate all values. return row (series) at [value_ask, header_search]'
'returns the whole row, which has been interpolated by the axis passed in the header_search
allows for non-index interpolation (useful for aribtrary indexes)
for time series (or other non-aribitrary indexes), see Get_interp_index_df2ser
#===========================================================================
# INPUTS
#===========================================================================\
df_raw: data set (with header_search in columns)
header_search = the header name from which to search for the value_ask
value_ask_raw: numeric value (on the header_search's column) from which to interoplate the other rows
#===========================================================================
# TESTING:
#===========================================================================
import sim.debug
df = sim.debug.Get_curve_df()
"""
'TODO: convert df index to float'
'TODO: check if the passed header is in the columns'
#===========================================================================
# check inputs
#===========================================================================
if not isinstance(df_raw, pd.core.frame.DataFrame):
logger.error('got undexpected type on df_raw: %s'%type(df_raw))
raise TypeError
#drop nan values
df_raw = df_raw.dropna(axis='index')
value_ask = round(value_ask_raw, 2)
#check if thsi value is outside of the passed column
df = df_raw.astype(np.float) #convert the index to floats
'there seems to be some problem with importing commas from excel'
df_sort = df_raw.sort_values(by=header_search).reset_index(drop='true')
if value_ask < df_sort.loc[0,header_search]:
logger.error('asked value is outside the domain')
return df_sort.loc[0,:]
last_index = len(df_sort.index) -1
if value_ask > df_sort.loc[last_index, header_search]:
logger.error('asked value is greater than the serach domain: %.2f'%value_ask)
return df_sort.iloc[last_index,:] #return the last row
#check if interpolation is even needed
bool_row = df_raw.loc[:,header_search] == value_ask #search for value
if sum(bool_row) == 1: #found one match
results_ser = df_raw.loc[bool_row,:].iloc[0] #get this row
return results_ser
elif sum(bool_row) >1: #found multiple matches
df_trim = df_raw.loc[bool_row,header_search]
logger.error('found too many existing matches in search: \n %s'%df_trim)
raise ValueError
#builda new df with the header_search as the index
'I think there is a better command for this'
index = list(df_raw.loc[:,header_search])
bool_col = df_raw.columns != header_search #get the remaining
col = df_raw.columns[bool_col]
data = df_raw.loc[:,bool_col].values #get all this data
df = pd.DataFrame(data = data, index = index, columns = col )
ser = pd.Series(data=None, index= col) #dummy row for adding
df.loc[value_ask,:] = ser #add this in at teh requested row
#resort the frame
df_interp = df.sort_index()
#convert each value to numeric
for col in df_interp: df_interp[col] = pd.to_numeric(df_interp[col], errors='coerce')
#interpolate the missing values
'WARNING: all methods (except linear) interpolate based on the index'
df_new = df_interp.interpolate(method='values')
#Extract interpolated row
results_ser = df_new.ix[value_ask] #get the results row
results_ser.loc[header_search] = value_ask #add teh search value/header back
return results_ser
def Get_interp_ser(ser_raw, index_ask_raw, logger=mod_logger): #get the corresponding value for the passed index using the series
"""TESTING
index_ask = 52
"""
logger.debug('performing interpolation for index_ask: %.2f'%index_ask_raw)
#===========================================================================
# check inputs
#===========================================================================
if not isinstance(ser_raw, pd.core.series.Series):
logger.error('got undexpected type on ser_raw: %s'%type(ser_raw))
raise TypeError
'todo: check that the index is numeric'
index_ask = float(round(index_ask_raw, 2)) #round/convert ask
ser = ser_raw.copy(deep=True)
ser.index = ser_raw.index.astype(np.float) #convert the index to floats
#===========================================================================
#check if interpolation is even needed
#===========================================================================
bool = ser.index == index_ask #search for value
if sum(bool) == 1: #found one match
value = float(ser[index_ask]) #get this row
return value
elif sum(bool) >1: #found multiple matches
logger.error('found too many existing matches in search: \n %s'%ser)
raise ValueError
#===========================================================================
# perform inerpolation
#===========================================================================
ser = ser.set_value(index_ask, np.nan) #add the dummy search value
#resort the frame
ser = ser.sort_index()
#convert each value to numeric
#for value in df_interp: df_interp[col] = pd.to_numeric(df_interp[col], errors='coerce')
ser_interp = ser.interpolate(method='values')
#get the requested value
value = float(ser_interp[index_ask])
return value
def Get_interp_index_df2ser(df_raw, index_ask_raw, logger=mod_logger): #get the corresponding value for the passed index using the series
"""
#===========================================================================
# INPUTS
#===========================================================================
df_raw: dataframe with numeric or time series index
index_ask_raw: new index value where you want a row to be interpolated on
#===========================================================================
# LIMITATION
#===========================================================================
I believe the linear interpolater just splits the difference between the bounding rows
TESTING
index_ask = 52
type(df_raw)
"""
logger.debug('performing interpolation for index_ask: %s'%index_ask_raw)
#===========================================================================
# check inputs
#===========================================================================
if not isinstance(df_raw, pd.core.frame.DataFrame):
logger.error('got undexpected type on df_raw: %s'%type(df_raw))
raise TypeError
if index_ask_raw < df_raw.index[0]:
logger.error('got an index outside the bounds of the passed frame: \n %s'%df_raw.index)
raise IOError
index_ask = index_ask_raw
#===========================================================================
# # check if interpolation is even needed
#===========================================================================
bool = df_raw.index == index_ask #search for value
if sum(bool) == 1: #found one match
ser = df_raw.loc[bool].iloc[0] #get this row
return ser
elif sum(bool) >1: #found multiple matches
logger.error('found too many existing matches in search: \n %s'%index_ask)
raise ValueError
#===========================================================================
# perform inerpolation
#===========================================================================
new_row = pd.DataFrame(index = [index_ask], columns = df_raw.columns)
df = df_raw.append(new_row).sort_index()
df_interp = df.interpolate(method='linear')
#get the requested value
ser = df_interp.loc[index_ask, :]
return ser
def gen_ser_keyvalue(keys_ser, kv_df, value_head = 'rank', logger=mod_logger): #generate a response series
"""
#===========================================================================
# INPUTS
#===========================================================================
keys_ser: series of values on which to generate a response from the kv mapping
kv_df: dictionary like frame where
col1_values = keys (found in keys_ser)
col2_values = values corresponding to the keys
search_header: header defining the values column
"""
logger = logger.getChild('gen_ser_keyvalue')
#===========================================================================
# build the dictionary
#===========================================================================
key_head = keys_ser.name #header of keys
if not header_check(kv_df, [key_head,value_head ]): raise IOError
#efficietnly build a dictionary from two columns
keys = kv_df.loc[:,key_head].values
values = kv_df.loc[:,value_head].values
dictionary = pd.Series(values,index=keys).to_dict()
#===========================================================================
# get the values
#===========================================================================
df = pd.DataFrame(keys_ser)
df[value_head] = df.iloc[:,0] #create a second column and fill it with the key entries
df1 = df.replace({value_head: dictionary }) #replace the entries with the dictionary
return df1[value_head]
def search_str_fr_list(ser, #find where items have all the items in the search_str_list
search_str_list, case=False, all_any = 'all',
logger=mod_logger):
"""
#===========================================================================
# INPUTS
#===========================================================================
all_any: flag denoting how to to treat the combined conditional
all: find rows where every string in the list is there
any: find rows where ANY string in the list is there
"""
logger = logger.getChild('search_str_fr_list')
if not isser(ser): raise IOError
#starter boolidx series
df_bool = pd.DataFrame(index = search_str_list, columns = ser.index)
#loop through and find search results for each string
for index, row in df_bool.iterrows():
boolcol = ser.astype(str).str.contains(index, case=case)
df_bool.loc[index,:] = boolcol.values.T
#find the result of all rows
if all_any == 'all':
boolidx = df_bool.all()
elif all_any == 'any':
boolidx = df_bool.any()
else:
logger.error('got unexpected kwarg for all_any: \'%s\''%all_any)
raise IOError
logger.debug('found %i series match from string_l (%i): %s'
%(boolidx.sum(), len(search_str_list), search_str_list))
if boolidx.sum() == 0:
logger.warning('no matches found')
return boolidx
#===============================================================================
# IMPORTS --------------------------------------------------------------------
#===============================================================================
def slice_df_from_file(df_raw, slicefile_path, #slice a df with values found in an external slicefile
spcl_srch = '*', drop_omits=True, logger=mod_logger):
"""
#===========================================================================
# ARCHITECTURE
#===========================================================================
The external slicefile contains the logic on which to slice:
slicing columns: any columns NOT found in the slicefile will be sliced OUT
slicing rows: if only a header is provided in the slice file, no data will be sliced
if values are provided under a header in the slice file, data not matching this will be sliced OUT
values ending in * will be searched for any strings containing the value
#===========================================================================
# INPUTS
#===========================================================================
slicefile_path: file path to csv with headers and values on which to slice
df_raw: df to slice with slicefile data
drop_omits: flag for header omission treatment
True: if the header is missing, the column is dropped
False: do nothing on missing headers
#===========================================================================
# TESTING
#===========================================================================
view_df(df_raw)
"""
logger = logger.getChild('slice_df_from_file')
logger.debug('on df_raw (%i cols x %i indx) from slice_file:\n %s'
%(len(df_raw.columns), len(df_raw.index), slicefile_path))
#===========================================================================
# #check and attach slice file
#===========================================================================
if not os.path.exists(slicefile_path):
if slicefile_path is None:
logger.error('no slicefile entered')
raise IOError
logger.error('passed slicefile does not exist: \n %s'%slicefile_path)
raise IOError
#load the slice file
'imports without headers. headers are kept on row 0 for manipulation as data'
slicefile_df_raw = pd.read_csv(slicefile_path,
header = None,
index_col=False,
skipinitialspace=True)
#===========================================================================
# clean the slice file
#===========================================================================
#strip leading/trailing zeros from the first row
slicefile_df_clean0 = slicefile_df_raw.copy(deep=True)
slicefile_df_clean0.iloc[0,:] = slicefile_df_raw.iloc[0,:].str.strip()
#drop any rows with all nan
slicefile_df_clean1 = slicefile_df_clean0.dropna(axis='index', how='all')
slicefile_df_clean2 = slicefile_df_clean1.dropna(axis='columns', how='all').reset_index(drop='true') #drop any columns with all na
#strip trailing zerios
slicefile_df_clean = slicefile_df_clean2
if len(slicefile_df_clean) == 0:
logger.info('no values selected for slicing. returning raw frame')
return df_raw
#===========================================================================
# slice columns
#===========================================================================
slice_head_list = slicefile_df_clean.iloc[0,:].values.tolist() #use the first row
#make sure all these passed headers are found in the data
if not header_check(df_raw, slice_head_list, logger = logger):
os.startfile(slicefile_path)
raise IOError
if drop_omits:
boolhead = df_raw.columns.isin(slice_head_list)
df_head_slice = df_raw.loc[:,boolhead] #get just the headers found in the first row of the slicer
else:
df_head_slice = df_raw.copy(deep=True)
#===========================================================================
# #manipulate the slice df to match header dtype as expected
#===========================================================================
#boolcol_indata = slicefile_df_clean.columns.isin(df_raw.columns)
slicefile_df2_clean = slicefile_df_clean.copy(deep=True)
slicefile_df2_clean.columns = slicefile_df2_clean.iloc[0,:] #reset teh columns
slicefile_df2_clean = slicefile_df2_clean.iloc[1:,:] #drop the first row
if len(slicefile_df2_clean) == 0:
logger.warning('no slicing values provided')
#=======================================================================
# build the boolidx from teh slicefile_df
#=======================================================================
#start with bool of all trues
boolidx_master = pd.DataFrame(~df_raw.loc[:,slicefile_df2_clean.columns[0]].isin(df_head_slice.columns))
for header, col in slicefile_df2_clean.items(): #loop through each column and update the logical df
slice_list = list(col.dropna())
if len(slice_list) == 0: #check that we even want to slice this
logger.debug('no slicing values given for header %s'%header)
continue
boolcol = df_head_slice.columns == header #select just this column
#=======================================================================
# build the slice boolidx
#=======================================================================
if spcl_srch in str(slice_list): #check for the special search flag
#===================================================================
# special search
#===================================================================
boolidx = ~boolidx_master.iloc[:,0]
for search_value in slice_list:
boolidx_new = df_head_slice[header].astype(str) == search_value
if isinstance(search_value, str):
if search_value.endswith('*'): #special search
new_sv = search_value[:-1] #drop the asterisk
#overwrite the boolidx
boolidx_new = df_head_slice[header].astype(str).str.contains(new_sv)
logger.debug("contains search found %i entries on header %s"%
(boolidx.sum(), header))
else: pass
else: pass #just leave the original boolidx
boolidx = np.logical_or(boolidx, boolidx_new) #get the logical cmululation of all these
logger.info('special search on \'%s\' found %i entries'%(header, boolidx.sum()))
boolidx = pd.DataFrame(boolidx)
else: #normal search
boolidx = df_head_slice.loc[:,boolcol].astype(str).isin(slice_list) #find the rows where (on this column) the values are contained in the list
'''
df_head_slice.loc[:,boolcol]
'''
#check that we havent sliced all the data
if not boolidx.any(axis=0)[0]:
logger.debug('slicefile_path: \n %s'%slicefile_path)
logger.warning('slicing col \'%s\' found no rows with values \n %s'%(header, slice_list))
boolidx_master = np.logical_and(boolidx_master, boolidx) #get df of logical AND combination of two child dfs
logger.debug('on header: %s slicing found %i entries matching: \n %s'%(header, len(boolidx), slice_list))
#=======================================================================
# get final slice
#=======================================================================
df_slice = df_head_slice.loc[boolidx_master.iloc[:,0],:]
if len(df_slice.index) < 1:
logger.error('sliced out all rows')
raise IOError
if len(df_slice.columns) <1:
logger.error('sliced out all columns')
raise IOError
cleaner_report(df_raw, df_slice, cleaner_name ='slice', logger= logger)
return df_slice
def load_smart_df(filepath, logger = mod_logger, **kwargs):
if filepath.endswith('.csv'):
return load_csv_df(filepath, logger = logger, **kwargs)
if filepath.endswith('.xls'):
return load_xls_df(filepath, logger = logger, **kwargs)
else:
raise IOError
def load_csv_df(filepath,
test_trim_row = None, #for partial loading, stop at this line
header = 0, #Row number(s) to use as the column names, and the start of the data.
#fofor dxcol, pass a list of the column names
index_col = 0, #for
skip_blank_lines = True,
skipinitialspace = True,
skiprows = None,
parse_dates=True,
sep = ',',
logger=mod_logger,
**kwargs):
#===========================================================================
# defaults
#===========================================================================
logger = logger.getChild('load_csv_df')
if not test_trim_row is None: test_trim_row = int(test_trim_row)
#===========================================================================
# prechecks
#===========================================================================
if not os.path.exists(filepath):
logger.error('passed filepath does not exist: %s'%filepath)
raise IOError
if not filepath.endswith('.csv'):
raise IOError
if (not isinstance(header, int)) and (not header is None): #normal state
if not isinstance(header, list):
raise IOError
else:
for v in header:
if not isinstance(v, int):
raise IOError
"""
header = [0,1]
hp_pd.v(df_raw)
"""
try: #default engine
df_raw = pd.read_csv(filepath,
header = header, index_col=index_col, skip_blank_lines = skip_blank_lines,
skipinitialspace=skipinitialspace, skiprows = skiprows,
parse_dates=parse_dates,sep = sep,
**kwargs)
except:
try: #using the python engine
df_raw = pd.read_csv(open(filepath,'rU'), encoding='utf-8', engine='python',
header = header, index_col=index_col,skip_blank_lines = skip_blank_lines,
skipinitialspace=skipinitialspace, skiprows = skiprows,
parse_dates=parse_dates,sep = sep,
**kwargs)
logger.debug('loaded successfully using python engine')
except:
logger.error('failed to load data from %s'%filepath)
raise IOError
logger.debug('loaded df %s from file: \n %s'%(df_raw.shape, filepath))
#=======================================================================
# trim for testing flag
#=======================================================================
if not test_trim_row is None:
df = df_raw.iloc[0:test_trim_row,:] #for testing, just take the first 100 rows
logger.warning('TEST FLAG=TRUE. only loading the first %i rows'%test_trim_row)
else:
df = df_raw.copy(deep=True)
#===========================================================================
# format index
#===========================================================================
df1 = df.copy(deep=True)
try:
df1.index = df.index.astype(np.int)
except:
logger.warning('failed to convert index to numeric')
return df1
def load_xls_df(filepath, logger=mod_logger,
test_trim_row = None,
sheetname = 0,
header = 0, #Row number(s) to use as the column names, and the start of the data.
index_col = 0,
parse_dates=False,
skiprows = None,
convert_float = False,
**kwargs):
"""
#===========================================================================
# INPUTS
#===========================================================================
sheetname: None returns a dictionary of frames
0 returns the first tab
#===========================================================================
# KNOWN ISSUES
#===========================================================================
converting TRUE/FALSE to 1.0/0.0 for partial columns (see pandas. read_excel)
"""
#===========================================================================
# defaults
#===========================================================================
logger = logger.getChild('load_xls_df')
if not test_trim_row is None: test_trim_row = int(test_trim_row)
#===========================================================================
# prechecks
#===========================================================================
if not filepath.endswith('.xls'): raise IOError('got unexpected file extension: \'%s\''%filepath[:-4])
if not isinstance(filepath, str): raise IOError
'todo: add some convenience methods to append xls and try again'
if not os.path.exists(filepath):
raise IOError('passed filepath not found: \n %s'%filepath)
if parse_dates: raise IOError #not impelmented apparently..
#===========================================================================
# loader
#===========================================================================
try:
df_raw = pd.read_excel(filepath,
sheet_name = sheetname,
header = header,
index_col = index_col,
skiprows = skiprows,
parse_dates = parse_dates,
convert_float = convert_float,
engine = None,
formatting_info = False,
verbose= False,
**kwargs)
except:
raise IOError('unable to read xls from: \n %s'%filepath)
#===========================================================================
# post checks
#===========================================================================
if not isdf(df_raw):
if not sheetname is None: raise IOError
if not isinstance(df_raw, dict): raise IOError
logger.debug('sheetname = None passed. loaded as dictionary of frames')
df_dict = df_raw
for tabname, df_raw in df_dict.items():
#=======================================================================
# trim for testing flag
#=======================================================================
df = None
if not test_trim_row is None:
if test_trim_row < len(df_raw):
df = df_raw.iloc[0:test_trim_row,:] #for testing, just take the first 100 rows
logger.warning('TEST FLAG=TRUE. only loading the first %i rows'%test_trim_row)
if df is None:
df = df_raw
df_dict[tabname] = df #update the dictionary
return df_dict
logger.debug('loaded df %s from sheet \'%s\' and file: \n %s'%(df_raw.shape, sheetname, filepath))
#=======================================================================
# trim for testing flag
#=======================================================================
df = None
if not test_trim_row is None:
if test_trim_row < len(df_raw):
df = df_raw.iloc[0:test_trim_row,:] #for testing, just take the first 100 rows
logger.warning('TEST FLAG=TRUE. only loading the first %i rows'%test_trim_row)
if df is None:
df = df_raw
return df
def load_xls_d(filepath, #load a xls collection of tabs to spreadsheet
logger=mod_logger, **kwargs):
#===========================================================================
# defaults
#===========================================================================
logger = logger.getChild('load_xls_d')
#===========================================================================
# setup
#===========================================================================
df_d = OrderedDict() #creat ehte dictionary for writing
#get sheet list name
xls = xlrd.open_workbook(filepath, on_demand=True)
sheetnames_list = xls.sheet_names()
logger.debug('with %i sheets from %s: %s'%(len(sheetnames_list),filepath, sheetnames_list))
for sheetname in sheetnames_list:
logger.debug('on sheet: \'%s\' \n'%sheetname)
#pull the df from the file and do the custom parameter formatting/trimming
df_raw = load_xls_df(filepath, sheetname = sheetname,
logger = logger,
**kwargs)
if len(df_raw) < 1:
logger.error('got no data from tab \'%s\' in %s '%(sheetname, filepath))
raise IOError
df_d[sheetname] = df_raw #update the dictionary
logger.debug('loaded %i dfs from xls sheets: %s'%(len(list(df_d.keys())), list(df_d.keys())))
return df_d
#===============================================================================
# CONVERSIONS ----------------------------------------------------------------
#===============================================================================
def force_dtype_from_tplate(df_raw, templatefile_path, logger=mod_logger): #force the df column dtype to match that provided in the template
"""
#===========================================================================
# INPUTS
#===========================================================================
templatefile_path: filepath to a csv with row0: header, row1: dtype (str, float, int, datetime, None)
df: dataframe on which to map the passed type
#===========================================================================
# TESTING
#===========================================================================
view_df(df_raw)
"""
logger=logger.getChild('dtype_tplate')
logger.debug('force_dtype_from_tplate from template file:\n %s'%templatefile_path)
#===========================================================================
# checks
#===========================================================================
if not os.path.exists(templatefile_path): raise IOError
if not isdf(df_raw): raise IOError
#load the template file
template_df = pd.read_csv(templatefile_path,
header = 0,
index_col=False)
template_ser = pd.Series(template_df.iloc[0,:])
""" Im leaving this out for testing as Im using the large data format template
#check that the headers match
DFColumnCheck(template_df,df.columns)
"""
df = df_raw.copy(deep=True)
for header, col in df_raw.items(): #loop through each column
if not header in template_ser.index:
logger.warning('could not find \' %s \' in teh template file'%header)
continue
dtype_str = template_ser[header]
if pd.isnull(dtype_str):
logger.debug('no dtype found in template for %s'%header)
continue
elif dtype_str == 'datetime':
df.loc[:,header] = pd.to_datetime(col) #send this back to the df as a datetime
elif dtype_str == 'float':
try:
df.loc[:,header] = col.astype(np.float64, copy=False)
except:
logger.error('failed to convert header %s to float'%header)
raise IOError
elif dtype_str == 'int':
try:
df.loc[:,header] = col.astype(np.dtype(int), copy=False)
except:
logger.error('failed to convert header %s to integer from tplate file: \n %s'%(header,templatefile_path ))
raise IOError
elif dtype_str == 'str':
df.loc[:,header] = col.astype(np.dtype(str), copy=False)
else:
logger.warning('got unexpected value for dtype = %s on %s' %(dtype_str, header))
raise IOError
logger.debug('changed dtype on %s to %s'%(header, dtype_str))
#===========================================================================
# check
#===========================================================================
df_check = df.dropna(axis='columns', how='all')
df_raw_chk = df_raw.dropna(axis='columns', how='all')
logger.debug('dropped nas and checking if we lost any columns')
if not len(df_check.columns) == len(df_raw_chk.columns):
cleaner_report(df_check, df_raw, cleaner_name = 'clean_df_frmt ERROR', logger = logger)
logger.error('some columns were lost in teh formatting')
raise IOError
return df
def df_to_1line_dict(df_raw, convert_none=True, logger=mod_logger): #convert the first two rows of a df into a dictionary
'builtin to_dict doesnt handle single entry dictionaries'
keys_list = []
values_list = []
if not len(df_raw.index) == 1:
logger.error('got unepxected shape of df')
raise IOError
for header, col in df_raw.items():
if header.startswith('Unnamed'):
if convert_none:
keys_list.append(None)
else:
logger.warning('found Unnamed header: \'%s\' but convert_none flag =False'%header)
else:
keys_list.append(header)
value = col[0]
values_list.append(value)
dictionary = dict(list(zip(keys_list, values_list))) #zip these into a dcit
return dictionary
def convert_headers_frm_dict(df_raw, old_new_d, logger=mod_logger): #converts the passed header names to those in the dictionary and reorders
logger = logger.getChild('convert_headers_frm_dict')
df = df_raw.copy(deep=True)
new_headers = [] #headers from the passed dictionary
focus_headers = [] #renamed headers in the dataset
extra_headers = []
for header in df.columns:
if header in old_new_d: #convert me
conv_head = old_new_d[header]
if conv_head is None: raise IOError
if pd.isnull(conv_head): raise IOError
new_headers.append(conv_head)
focus_headers.append(conv_head)
logger.debug('converting \'%s\' to \'%s\''%(header, conv_head))
else: #no conversion here
extra_headers.append(header)
new_headers.append(header)
#apply the new headers
df.columns = new_headers
logger.debug('renamed %i headers: \n %s'%(len(focus_headers), focus_headers))
#===========================================================================
# reorder the columns
#===========================================================================
new_head_ordr = sorted(focus_headers) + extra_headers
df_ordr = df[new_head_ordr]
if not len(df_ordr.columns) == len(df_raw.columns):
logger.error('lost some columns')
raise IOError
logger.debug('reordered headers')
return df_ordr
def dict_to_ser(key_ser, dictionary): #generates a series from the dictionary keys
new_ser = pd.Series(index=key_ser.index, dtype = key_ser.dtype)
for index, entry in key_ser.items():
new_ser[index] = dictionary[entry]
return new_ser
def ser_fill_df(ser, rows, logger = mod_logger): #fill a dataframe with this series
"""
#===========================================================================
# INPUT
#===========================================================================
rows: count of row length for dummy df
"""
if not isser(ser): raise IOError
#buidl blank frame for writing
df = pd.DataFrame(index = list(range(0, rows)), columns = ser.index.values)
#loop through and fill
for index, row in df.iterrows(): df.iloc[index,:] = ser.values.tolist()
return df
def right_fill_df_atts(obj, att_names_list, df_raw, logger=mod_logger): #add a dummy block of attribute values to the df_raw
logger = logger.getChild('right_fill_df_atts')
#=======================================================================
#pull values from self
#=======================================================================
def_ser = pd.Series(index = att_names_list)
for att_name in att_names_list:
if not hasattr(obj, att_name):
logger.error('passed obj does not have an attribute \'%s\''%att_name)
raise IOError
att_value = getattr(obj, att_name) #get this attribute
def_ser[att_name] = att_value #fill it out
#fill the frame
dummy_df = ser_fill_df(def_ser, len(df_raw), logger = logger)
#append these
df_merge = pd.merge(df_raw, dummy_df, how = 'left', right_index=True, left_index=True)
if not len(df_merge) == len(df_raw): raise IOError
logger.debug('added %i new columns to make df_merge %s: %s'
%(len(att_names_list), str(df_merge.shape), att_names_list))
return df_merge
def right_fill_df_dict(right_dict, df_raw, logger=mod_logger): #add a dummy block of attribute values to the df_raw
logger = logger.getChild('right_fill_df_dict')
#=======================================================================
#pull values from self
#=======================================================================
def_ser = pd.Series(right_dict)
#fill the frame
dummy_df = ser_fill_df(def_ser, len(df_raw), logger = logger)
#append these
df_merge = pd.merge(df_raw, dummy_df, how = 'left', right_index=True, left_index=True)
if not len(df_merge) == len(df_raw): raise IOError
logger.debug('added %i new columns to make df_merge %s: %s'
%(len(right_dict), str(df_merge.shape), list(right_dict.keys())))
return df_merge
def concat_many_heads(df_raw, heads_list, concat_name = 'concat', sep = ' ', #combine many columns into one string
logger=mod_logger):
#=======================================================================
# concat the columns of interest
#=======================================================================
df1 = df_raw.copy(deep=True)
for index, col in enumerate(heads_list):
if index ==0:
df1[concat_name] = df_raw[col].map(str)
continue
ser = df_raw[col].map(str)
df1[concat_name] = df1[concat_name].str.cat(ser, sep= sep)
return df1
class Head_translator(): #object for translating headers
def __init__(self, filepath, Parent):
self.Parent = Parent
self.logger = Parent.logger.getChild('tr')
self.filepath = filepath
self.name = 'Head_translator'
if not os.path.exists(filepath): raise IOError
self.load_dict(filepath)
self.logger.debug('initilzied')
def load_dict(self, filepath, #load the tr_dict from file
header = 0, index_col = False, skip_blank_lines=True, skipinitialspace=True,
**kwargs):
"""
#=======================================================================
# INPUTS
#=======================================================================
tr_filepath: filepath to csvcontaining tr_dict
row1: header name foundin raw data
row2: header name to map onto frame (new harmonized name
BLANK of omitted = no translation done
tr_dict: header translation is performed with external translation dictionaries
key: raw header
value: new header
"""
logger = self.logger.getChild('load_dict')
logger.debug("loding dictionary from:\n %s"%filepath)
df_raw = pd.read_csv(filepath,
header = header,
index_col=index_col,
skip_blank_lines=skip_blank_lines,
skipinitialspace=skipinitialspace, **kwargs)
df = clean_dropna(df_raw, logger=self.logger)
self.tr_dict = df_to_1line_dict(df, convert_none=True, logger=self.logger)
return self.tr_dict
def translate(self, df_raw, expect_heads = None):
logger = self.logger.getChild('translate')
logger.debug('performing on df_raw %s'%str(df_raw.shape))
#=======================================================================
# #make the translation
#=======================================================================
df1 = convert_headers_frm_dict(df_raw, self.tr_dict, logger=logger)
return df1
#===============================================================================
# OUTPUTS --------------------------------------------------------------------
#===============================================================================
def write_to_file(filename, data, #write the df to a csv. intelligent
overwrite=False,
float_format=None,
index=False, #write the index?
logger=mod_logger, **kwargs ):
logger=logger.getChild('write_to_file')
# checks
if not isdf(data):
if not isser(data):
raise TypeError
#===========================================================================
# defaults
#===========================================================================
if not filename.endswith('.csv'): filename = filename + '.csv'
if overwrite == False: #don't overwrite
if os.path.isfile(filename): #Check whether file exists
logger.warning('File exists already: \n %s'%filename)
raise IOError
#=======================================================================
# root folder setup
#=======================================================================
head, tail = os.path.split(filename)
if not os.path.exists(head): os.makedirs(head) #make this directory
#===========================================================================
# writing
#===========================================================================
try:
data.to_csv(filename, float_format = float_format, index=index, **kwargs)
logger.info('df %s written to file: \n %s'%(str(data.shape), filename))
except:
logger.warning('WriteDF Failed for filename: \n %s'%filename)
logger.debug('df: \n %s'%data)
raise IOError
return
def df_to_logger(df_raw, logger = mod_logger, row_print_count = 10): #log large dfs
if not isinstance(df_raw, pd.core.frame.DataFrame):
logger.error('got unexpected type for passed df: %s'%type(df_raw))
raise IOError
df = df_raw.copy(deep=True)
#change the display options
with pd.option_context('display.max_rows', None,
'display.max_columns', None,
'display.height',1000,
'display.width',1000):
logger.debug('\n %s \n \n \n \n'%df)
def write_dfset_excel(df_set_dict, filepath, #write a dictionary of frames to excel
engine='xlsxwriter', logger=mod_logger, **kwargs):
#===========================================================================
# setup defaults
#===========================================================================
logger=logger.getChild('write_dfset_excel')
if not filepath.endswith('.xls'): filepath = filepath + '.xls'
#===========================================================================
# make the root folder
#===========================================================================
head, tail = os.path.split(filepath)
if not os.path.exists(head): os.makedirs(head)
#===========================================================================
# data setup
#===========================================================================
"""NO! use the provided order
#sort the dictionary by key
od = OrderedDict(sorted(df_set_dict.items(), key=lambda t: t[0]))"""
#===========================================================================
# #write to multiple tabs
#===========================================================================
writer = pd.ExcelWriter(filepath, engine=engine)
for df_name, df in df_set_dict.items():
logger.debug("on \'%s\'"%df_name)
if not isdf(df):
if not isser(df):
logger.debug('got unexpected type on bundled data: \'%s\'. attempting to convert'%type(df))
try: df = pd.DataFrame([df])
except:
raise IOError
if len(df) == 0: continue #skip empty frames
try:
df.to_excel(writer, sheet_name=str(df_name), **kwargs)
except:
logger.error('failed to write df %s'%df_name)
raise IOError
writer.save()
logger.info('wrote %i frames/tab to: %s'%(len(list(df_set_dict.keys())), filepath))
def sort_workbook(filename): #sort xls by tab name
'not working'
import xlsxwriter
workbook = xlsxwriter.Workbook(filename)
# sort sheets based on name
workbook.worksheets_objs.sort(key=lambda x: x.name)
workbook.close()
def val_to_str(val): #intelligently generate a value string by type
if isinstance(val, float): return '%.4f'%val
if hasattr(val, 'shape'): return str(val.shape)
return str(val)
def write_fly_df( #write the first row of the df
filepath,
data,
lindex = None, #how to start this line in the file
first=False, #indicator for the first call
tag='',
db_f = False,
logger=mod_logger): #
"""
setup to handle series, df, or dxcol
"""
logger = logger.getChild('write_fly_df')
if len(data) == 0:
logger.warning('got empty data. skipping')
return
#===========================================================================
# defaults
#===========================================================================
if isinstance(data, pd.Series):
headers = data.index
data_ar = data.values
if lindex is None: lindex = data.name
elif isinstance(data, pd.DataFrame):
headers = data.columns
data_ar = data.iloc[0].values
if lindex is None: lindex = data.index[0]
else:
logger.error('got unexpected type for data %s'%type(data))
raise IOError
#===========================================================================
# prechecks
#===========================================================================
if db_f:
if not os.path.exists( os.path.dirname(filepath)):
raise IOError
#===========================================================================
# make writes
#===========================================================================
with open(filepath, 'a') as f: #re open and append to the file
#=======================================================================
# headers
#=======================================================================
if first:
logger.debug('first file write. setting up %i headers of t ype \'%s\''%(len(headers), type(headers)))
f.write('%s,'%tag)
#===============================================================
# normal 1Dcolumns
#===============================================================
if not isinstance(headers, pd.MultiIndex):
for k in headers:
#print k, v
f.write('%s,'%k)
f.write('\n') #start a new line
#===============================================================
# mdex columns
#===============================================================
else:
mdex = headers
first = True
for name in mdex.names: #loop through each level
l = mdex.get_level_values(name).values.tolist() #get all the values at this level
#lindex
if first: first = False
else:
'for the first line this slot is taken up by the session tag'
f.write('%s,'%name)
#write each of these
for k in l:
f.write('%s,'%k)
f.write('\n') #start a new line
logger.debug('stored mdex headers with %i levels: %s'%(mdex.nlevels, mdex.names))
#=======================================================================
# write the indexer
#=======================================================================
f.write('%s,'%lindex)
#====== =================================================================
# write the values
#=======================================================================
for v in data_ar: #just taking the values from the first row. SHOULD ONLY HAVE 1 ROW!
f.write('%s,'%v) #write ht evalues
f.write('\n') #start a new line
logger.debug('appended %i entries under \'%s\' to file %s'%
(len(data_ar), lindex, filepath))
return
# CHECKING ---------------------------------------------------------------------
def is_multi(obj):
if isinstance(obj, float): return False
#check if its numpy
if type(obj).__module__ == np.__name__: return True
#check if its pandas
if type(obj).__module__ == pd.__name__: return True
return False
def isdf(df, logger=mod_logger): #check if this is a df
if df is None: return False
if not isinstance(df, pd.core.frame.DataFrame):
#logger.debug('got undexpected type on passed obj: %s'%type(df))
return False
return True
def isser(ser, logger=mod_logger):
logger=logger.getChild('isser')
if not isinstance(ser, pd.core.series.Series):
#logger.error('got unexpected dtype: %s'%type(ser))
return False
else: return True
def ismdex(mdex, logger=mod_logger): #check if this is an mdex
logger = logger.getChild('ismdex')
if not isinstance(mdex, pd.core.indexes.multi.MultiIndex):
#logger.debug('got unexpected dtype: %s'%type(mdex))
return False
else: return True
def isdxcol(df, logger=mod_logger):
if isdf(df):
if ismdex(df.columns, logger=logger):
return True
logger.debug('passed df is not a dxcol')
return False
def isdxind(df, logger=mod_logger):
if isdf(df):
if ismdex(df.index, logger=logger):
return True
logger.debug('passed df is not a dxind')
return False
def smart_isdx(df, logger=mod_logger): #intelligent dx checking
logger = logger.getChild('smart_isdx')
#===========================================================================
# basic checks
#===========================================================================
if isdxcol(df, logger=logger): return True
if isdxind(df, logger=logger): return True
#check the index
boolidx = pd.isnull(df.index)
if boolidx.sum() > 0:
logger.warning('got nan entries on the index. may be a dx col')
return 'maybe'
#check if the index is an integer
"""
int_cnt = 0
for entry in df.index:
try:
_ = int(entry)
int_cnt = int_cnt +1
except: pass
if not int_cnt == len(df.index):
logger.warning('found mismatacfh on type count. may be a dx col')
return 'maybe'
"""
return False
def header_check(obj, expected_headers, return_list = False, logger = mod_logger): #check that all the expected headers are found in teh df
"""
#===========================================================================
# INPUTS
#===========================================================================
return_list: flag whether to return the list of headers not found in the data set (as second output)
expected_headers: list of headers to check against the passed dataset
"""
'todo: allow for index checking as well'
#===========================================================================
# build logger
#===========================================================================
logger = logger.getChild('header_check')
#===========================================================================
# check for data type and assign axis for checking
#===========================================================================
'takes either a series or a dataframe'
if isinstance(obj, pd.core.frame.DataFrame): #check for df
data_labels = list(obj.columns)
elif isinstance(obj, pd.core.series.Series): #check for series
data_labels = list(obj.index)
else:
logger.error('Expected type: df. found type: %s'%type(obj))
raise TypeError
#===========================================================================
# check for header match
#===========================================================================
flag = []
unfound_headers = []
for header in expected_headers: #loop thorugh each header and see if it exists
if not header in data_labels: #check that each manditory header is found in the inventory
msg = 'expected header: \'%s\' not found in headers: \n %s'%(header, data_labels)
flag.append(msg)
unfound_headers.append(header)
#===========================================================================
# print out all the flags
#===========================================================================
if len(flag) > 0:
for msg in flag: logger.warning(msg)
if return_list:
return False, unfound_headers
return False
else:
if return_list:
return True, unfound_headers
return True
def get_entries_missing(ser, ve_list, logger=mod_logger): #get entries on ve_list not foundin df_raw
logger = logger.getChild('get_entries_missing')
if not isser(ser): raise IOError
ve_ser = pd.Series(ve_list)
excluded_ser = ve_ser[~ve_ser.isin(ser)]
return list(excluded_ser.values)
def are_dupes(df_raw, colname = None, logger=mod_logger, keep=False, #check if theer are any duplicates
**kwargs):
"""
colname = None: check for duplicates on all rows
colname = 'index': check for duplicates on the index
"""
logger = logger.getChild('are_dupes')
if not isinstance(colname, list): #normal single column check
if colname == 'index':
boolidx = df_raw.index.duplicated(keep=keep)
else:
boolidx = df_raw.duplicated(subset = colname, keep=keep) #identify every entry and its twin
else:
"""
Here we want to find duplicate PAIRS
this is different from identifying rows with values duplicated internally
we want coupled duplictaes
"""
#merge the columsn to check on
chk_colnm = 'checker'
df1 = concat_many_heads(df_raw, colname, concat_name = chk_colnm, logger=logger)
#find where there are internal duplicates on thsi merged column
boolidx = df1.duplicated(subset = chk_colnm, keep=keep) #identify every entry and its twin
"""
df1.columns.tolist()
view_df(df1[boolidx])
df2 = df1[colname + [chk_colnm]]
df2.loc[:,'chk'] = boolidx.values
view_df(df2)
df3 = df2[boolidx]
view_df(df3)
"""
#===========================================================================
# closeout and report
#===========================================================================
if np.any(boolidx):
logger.debug('found %i (of %i) duplicates on \'%s\' '%(boolidx.sum(),len(df_raw), colname))
return True, boolidx
else:
return False, boolidx
def get_conflicts_bool(df_raw, header='head', logger=mod_logger):
logger = logger.getChild('get_conflicts_bool')
#get a unique frame
df_uq = df_raw.drop_duplicates(header)
#identify those entires that were dropped
boolidx = ~df_raw.index.isin(df_uq.index)
#get a frame of the dropped entries
df_nuq = df_raw[boolidx]
#get a list of the header values that were in teh dropped entries
head_nuq_list = df_nuq[header].values
#identify all the entries that share these values
con_boolidx = df_raw[header].isin(head_nuq_list)
logger.debug('identified %i of %i conflict entries'%(con_boolidx.sum(), len(df_raw)))
return con_boolidx
def Get_bool_list(series, check, logger=mod_logger): #return bool of whether check is in value_list (of each cell)
'checking if list format entries have a match to the passed check (single or list)'
'see Series.isin for series to list checking'
bool_list = []
check = str(check)
#if not type(check) == type('a'): raise TypeError
for index, value in series.items():
try: #for values in list form
value_list = value.split(',')
value_list = list(map(str, value_list))
if check in value_list: bool_list.append(True)
else: bool_list.append(False)
except: #for integer values
if np.isnan(value): bool_list.append(True) #postive treatment of empty cells
elif str(value) == check: bool_list.append(True)
else: bool_list.append(False)
bool = np.array(bool_list)
return bool
def check_false_boolcol(df_raw, convert=True, logger=mod_logger): #trys to detect booleans
"""
#===========================================================================
# USEr
#===========================================================================
Here were checking for imprpoerly inputed user information (incomplete bool columns)
make sure the whole raw frame is passed here (not a slice)
"""
if len(df_raw) < 2: return False, []
logger = logger.getChild('check_false_boolcol')
found = []
#===========================================================================
# initial cleaning
#===========================================================================
df1 = df_raw.dropna(axis='columns', how='all') #drop all the columns
'need to make su re the whole row isnt empty'
for colname, col in df1.items():
if col.dtype == np.bool:
continue
elif np.any(pd.isnull(col)): #must contain some null
logger.debug("on \'%s\' found %i nulls"%(colname, pd.isnull(col).sum()))
"""
df1[pd.isnull(col)]
"""
col1 = col.dropna()
if len(col1) == 0: continue #ignore columns with all nans
#find entries that look like missed booleans
bool1 = col1.values == 1.0
bool2 = col1.values == 0.0
bool3 = col1.values == 1
bool4 = col1.values == 0
#identify all non NAN entries that may be booleans
bool_cmb = np.logical_or(np.logical_or(bool1, bool2), | np.logical_or(bool3, bool4) | numpy.logical_or |
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(description="Convert literature estimates into T, log10L form.")
parser.add_argument("--config", default="config.yaml", help="The config file specifying everything we need.")
args = parser.parse_args()
# Likelihood functions to convert posteriors in weird formats into posteriors on temp, log10 Luminosity for a single star (of a potential binary).
import yaml
import numpy as np
from emcee import EnsembleSampler
f = open(args.config)
config = yaml.load(f)
f.close()
# Now, we need 4 possible lnprob functions, that take into account each of these possibilities.
ndim = 2
nwalkers = 10 * ndim
if "logT" in config:
logT, sigmalogT = config["logT"]
if "L" in config:
L, sigmaL = config["L"]
mu = np.array([logT, L])
Sigma = | np.array([[sigmalogT**2, 0.0], [0.0, sigmaL**2]]) | numpy.array |
import logging
import numpy as np
import matplotlib as mlp
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.colors as mcolors
from scipy.spatial import cKDTree as KDTree
from scipy.spatial import distance
from matplotlib.collections import PatchCollection
import simtools.util.legend_handlers as legH
from simtools.model.model_parameters import CAMERA_ROTATE_ANGLE
from simtools.util.model import getCameraName, isTwoMirrorTelescope, getTelescopeClass
__all__ = ['Camera']
class Camera:
'''
Camera class, defining pixel layout including rotation, finding neighbour pixels,
calculating FoV and plotting the camera.
Methods
-------
readPixelList(cameraConfigFile)
Read the pixel layout from the camera config file,
assumed to be in a sim_telarray format.
getPixelDiameter()
Get pixel diameter.
getPixelShape()
Get pixel shape.
getLightguideEfficiencyAngleFileName()
Get the file name of the lightguide efficiency as a function of incidence angle.
getLightguideEfficiencyWavelengthFileName()
Get the file name of the lightguide efficiency as a function of wavelength.
calcFOV()
Calculate the FOV of the camera in degrees,
taking into account the focal length (preferably the effective focal length).
getNeighbourPixels(pixels)
Find adjacent neighbour pixels in cameras with hexagonal or square pixels.
Only directly adjacent neighbours are searched for, no diagonals.
getEdgePixels(pixels, neighbours)
Find the edge pixels of the camera.
plotPixelLayout()
Plot the pixel layout for an observer facing the camera.
Including in the plot edge pixels, off pixels, pixel ID for the first 50 pixels,
coordinate systems, FOV, focal length and the average edge radius.
'''
# Constants for finding neighbour pixels.
PMT_NEIGHBOR_RADIUS_FACTOR = 1.1
SIPM_NEIGHBOR_RADIUS_FACTOR = 1.4
SIPM_ROW_COLUMN_DIST_FACTOR = 0.2
def __init__(self, telescopeModelName, cameraConfigFile, focalLength):
'''
Camera class, defining pixel layout including rotation, finding neighbour pixels,
calculating FoV and plotting the camera.
Parameters
----------
telescopeModelName: string
As provided by the telescope model method TelescopeModel (ex South-LST-1).
cameraConfigFile: string
The sim_telarray file name.
focalLength: float
The focal length of the camera in (preferably the effective focal length),
assumed to be in the same unit as the pixel positions in the cameraConfigFile.
'''
self._logger = logging.getLogger(__name__)
self._telescopeModelName = telescopeModelName
self._cameraName = getCameraName(self._telescopeModelName)
self._cameraConfigFile = cameraConfigFile
self._focalLength = focalLength
if self._focalLength <= 0:
raise ValueError('The focal length must be larger than zero')
self._pixels = self.readPixelList(cameraConfigFile)
self._pixels = self._rotatePixels(self._pixels)
# Initialize an empty list of neighbours, to be calculated only when necessary.
self._neighbours = None
# Initialize an empty list of edge pixels, to be calculated only when necessary.
self._edgePixelIndices = None
return
def readPixelList(self, cameraConfigFile):
'''
Read the pixel layout from the camera config file, assumed to be in a sim_telarray format.
Parameters
----------
cameraConfigFile: string
The sim_telarray file name.
Returns
-------
dict: pixels
A dictionary with the pixel positions, the camera rotation angle,
the pixel shape, the pixel diameter, the pixel IDs and their "on" status.
Notes
-----
The pixel shape can be hexagonal (denoted as 1 or 3) or a square (denoted as 2).
The hexagonal shapes differ in their orientation, where those denoted as 3 are rotated
clockwise by 30 degrees with respect to those denoted as 1.
'''
datFile = open(cameraConfigFile, 'r')
pixels = dict()
pixels['pixel_diameter'] = 9999
pixels['pixel_shape'] = 9999
pixels['pixel_spacing'] = 9999
pixels['lightguide_efficiency_angle_file'] = 'none'
pixels['lightguide_efficiency_wavelength_file'] = 'none'
pixels['rotateAngle'] = 0 # The LST and MST-NectarCam cameras need to be rotated
pixels['x'] = list()
pixels['y'] = list()
pixels['pixID'] = list()
pixels['pixOn'] = list()
for line in datFile:
pixInfo = line.split()
if line.startswith('PixType'):
pixels['pixel_shape'] = int(pixInfo[5].strip())
pixels['pixel_diameter'] = float(pixInfo[6].strip())
pixels['lightguide_efficiency_angle_file'] = pixInfo[8].strip().replace('"', '')
if len(pixInfo) > 9:
pixels['lightguide_efficiency_wavelength_file'] = (
pixInfo[9].strip().replace('"', '')
)
if line.startswith('Rotate'):
pixels['rotateAngle'] = np.deg2rad(float(pixInfo[1].strip()))
if line.startswith('Pixel'):
pixels['x'].append(float(pixInfo[3].strip()))
pixels['y'].append(float(pixInfo[4].strip()))
pixels['pixID'].append(int(pixInfo[1].strip()))
if len(pixInfo) > 9:
if int(pixInfo[9].strip()) != 0:
pixels['pixOn'].append(True)
else:
pixels['pixOn'].append(False)
else:
pixels['pixOn'].append(True)
if pixels['pixel_diameter'] == 9999:
raise ValueError(
'Could not read the pixel diameter'
' from {} file'.format(cameraConfigFile)
)
if pixels['pixel_shape'] not in [1, 2, 3]:
raise ValueError(
'Pixel shape in {} unrecognized '
'(has to be 1, 2 or 3)'.format(cameraConfigFile)
)
return pixels
def _rotatePixels(self, pixels):
'''
Rotate the pixels according to the rotation angle given in pixels['rotateAngle'].
Additional rotation is added to get to the camera view of an observer facing the camera.
The angle for the axes rotation depends on the coordinate system in which the original
data was provided.
Parameters
----------
pixels: dictionary
The dictionary produced by the readPixelList method of this class
Returns
-------
pixels: dict
The pixels dictionary with rotated pixels.
The pixels orientation for plotting is added to the dictionary in pixels['orientation'].
The orientation is determined by the pixel shape (see readPixelList for details).
Notes
-----
The additional rotation angle to get to the camera view of an observer facing the camera
is saved in the const dictionary CAMERA_ROTATE_ANGLE.
In the case of dual mirror telescopes, the axis is flipped in order to keep the same
axis definition as for single mirror telescopes.
One can check if the telescope is a two mirror one with isTwoMirrorTelescope.
'''
if isTwoMirrorTelescope(self._telescopeModelName):
pixels['y'] = [(-1) * yVal for yVal in pixels['y']]
rotateAngle = pixels['rotateAngle'] # So not to change the original angle
rotateAngle += np.deg2rad(CAMERA_ROTATE_ANGLE[self._cameraName])
self._logger.debug('Rotating pixels by {}'.format(np.rad2deg(rotateAngle)))
if rotateAngle != 0:
for i_pix, xyPixPos in enumerate(zip(pixels['x'], pixels['y'])):
pixels['x'][i_pix] = (
xyPixPos[0] * np.cos(rotateAngle) - xyPixPos[1] * np.sin(rotateAngle)
)
pixels['y'][i_pix] = (
xyPixPos[0] * np.sin(rotateAngle) + xyPixPos[1] * np.cos(rotateAngle)
)
pixels['orientation'] = 0
if pixels['pixel_shape'] == 1 or pixels['pixel_shape'] == 3:
if pixels['pixel_shape'] == 3:
pixels['orientation'] = 30
if rotateAngle > 0:
pixels['orientation'] += np.rad2deg(rotateAngle)
return pixels
def getPixelDiameter(self):
'''
Get pixel diameter contained in _pixels
Returns
-------
diameter: float
'''
return self._pixels['pixel_diameter']
def getPixelShape(self):
'''
Get pixel shape code 1, 2 or 3, where 1 and 3 are hexagonal pixels,
where one is rotated by 30 degrees with respect to the other.
A square pixel is denoted as 2.
Returns
-------
pixel shape: int (1, 2 or 3)
'''
return self._pixels['pixel_shape']
def getLightguideEfficiencyAngleFileName(self):
'''
Get the file name of the lightguide efficiency as a function of incidence angle.
Returns
-------
str: file name of the lightguide efficiency as a function of incidence angle.
'''
return self._pixels['lightguide_efficiency_angle_file']
def getLightguideEfficiencyWavelengthFileName(self):
'''
Get the file name of the lightguide efficiency as a function of wavelength.
Returns
-------
str: file name of the lightguide efficiency as a function of wavelength.
'''
return self._pixels['lightguide_efficiency_wavelength_file']
def getCameraFillFactor(self):
'''
Calculate the fill factor of the camera, defined as (pixel_diameter/pixel_spacing)**2
Returns
-------
float: the camera fill factor
'''
if self._pixels['pixel_spacing'] == 9999:
points = np.array([self._pixels['x'], self._pixels['y']]).T
pixelDistances = distance.cdist(points, points, 'euclidean')
self._pixels['pixel_spacing'] = np.min(pixelDistances[pixelDistances > 0])
return (self._pixels['pixel_diameter'] / self._pixels['pixel_spacing'])**2
def calcFOV(self):
'''
Calculate the FOV of the camera in degrees, taking into account the focal length.
Returns
-------
fov: float
The FOV of the camera in the degrees.
averageEdgeDistance: float
The average edge distance of the camera
Notes
-----
The x,y pixel positions and focal length are assumed to have the same unit (usually cm)
'''
self._logger.debug('Calculating the FoV')
return self._calcFOV(
self._pixels['x'],
self._pixels['y'],
self.getEdgePixels(),
self._focalLength
)
def _calcFOV(self, xPixel, yPixel, edgePixelIndices, focalLength):
'''
Calculate the FOV of the camera in degrees, taking into account the focal length.
Parameters
----------
xPixel: list
List of positions of the pixels on the x-axis
yPixel: list
List of positions of the pixels on the y-axis
edgePixelIndices: list
List of indices of the edge pixels
focalLength: float
The focal length of the camera in (preferably the effective focal length),
assumed to be in the same unit as the pixel positions.
Returns
-------
fov: float
The FOV of the camera in the degrees.
averageEdgeDistance: float
The average edge distance of the camera
Notes
-----
The x,y pixel positions and focal length are assumed to have the same unit (usually cm)
'''
self._logger.debug('Calculating the FoV')
averageEdgeDistance = 0
for i_pix in edgePixelIndices:
averageEdgeDistance += np.sqrt(xPixel[i_pix] ** 2 + yPixel[i_pix] ** 2)
averageEdgeDistance /= len(edgePixelIndices)
fov = 2 * np.rad2deg( | np.arctan(averageEdgeDistance / focalLength) | numpy.arctan |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
This file generates binary files.
'''
from __future__ import division
import os
import numpy as np
from src.dataset import CreateDatasetCoco
def search_dir(save_path):
'''
search_dir
'''
if not os.path.exists(save_path):
os.makedirs(save_path)
bin_path = save_path+"images"
if not os.path.exists(bin_path):
os.makedirs(bin_path)
def data_to_bin(val_dataset, save_path):
'''
data_to_bin
'''
i = 0
centers = []
scales = []
scores = []
ids = []
for item in val_dataset.create_dict_iterator():
inputs = item['image'].asnumpy()
inputs_flipped = inputs[:, :, :, ::-1]
c = item['center'].asnumpy()
s = item['scale'].asnumpy()
score = item['score'].asnumpy()
d = item['id'].asnumpy()
inputs.tofile(save_path+"images//"+str(i)+".bin")
inputs_flipped.tofile(save_path+"images//flipped"+str(i)+".bin")
centers.append(c.astype(np.float32))
scales.append(s.astype(np.float32))
scores.append(score.astype(np.float32))
ids.append(d.astype(np.float32))
i = i+1
np.save(os.path.join(save_path, "centers.npy"), | np.array(centers, dtype=np.float32) | numpy.array |
import math
import json
import torch
import numpy as np
from pathlib import Path
from scipy import stats
from quince.library import utils
from quince.library import models
from quince.library import datasets
from quince.library import plotting
def compute_intervals_ensemble(trial_dir, mc_samples):
config_path = trial_dir / "config.json"
with config_path.open(mode="r") as cp:
config = json.load(cp)
dataset_name = config.get("dataset_name")
ds_train = datasets.DATASETS.get(dataset_name)(**config.get("ds_train"))
outcome_ensemble = build_ensemble(
config=config, experiment_dir=trial_dir, ds=ds_train
)
ds_test = datasets.DATASETS.get(dataset_name)(**config.get("ds_test"))
_ = get_intervals(
dataset=ds_test,
outcome_ensemble=outcome_ensemble,
mc_samples_y=mc_samples,
file_path=trial_dir / "intervals.json",
)
def compute_intervals_kernel(trial_dir):
config_path = trial_dir / "config.json"
with config_path.open(mode="r") as cp:
config = json.load(cp)
dataset_name = config.get("dataset_name")
ds_train = datasets.DATASETS.get(dataset_name)(**config.get("ds_train"))
outcome_ensemble = build_ensemble(
config=config, experiment_dir=trial_dir, ds=ds_train
)
ds_test = datasets.DATASETS.get(dataset_name)(**config.get("ds_test"))
kr = models.KernelRegressor(
dataset=ds_train,
initial_length_scale=1.0,
feature_extractor=outcome_ensemble[0].encoder.encoder
if config["dataset_name"] == "hcmnist"
else None,
propensity_model=outcome_ensemble[0],
verbose=False,
)
ds_valid = datasets.DATASETS.get(dataset_name)(**config.get("ds_valid"))
kr.fit_length_scale(ds_valid, grid=np.arange(0.1, 3.0, 0.002))
_ = get_intervals_kernel(
dataset=ds_test, model=kr, file_path=trial_dir / "intervals_kernels.json",
)
def print_summary(experiment_dir, kernel):
summary = {"policy_risk": {"risk": {"true": []}, "error": {},}}
for k in GAMMAS.keys():
summary["policy_risk"]["risk"].update({k: []})
summary["policy_risk"]["error"].update({k: []})
for trial_dir in sorted(experiment_dir.iterdir()):
if "trial-" not in str(trial_dir):
continue
config_path = trial_dir / "config.json"
with config_path.open(mode="r") as cp:
config = json.load(cp)
dataset_name = config.get("dataset_name")
ds_test = datasets.DATASETS.get(dataset_name)(**config.get("ds_test"))
tau_true = torch.tensor(ds_test.mu1 - ds_test.mu0).to("cpu")
pi_true = (
tau_true >= 0.0 if config["dataset_name"] == "ihdp" else tau_true < 0.0
)
intervals = load_intervals(
file_path=trial_dir / "intervals_kernels.json"
if kernel
else trial_dir / "intervals.json",
)
update_summaries(
summary=summary,
dataset=ds_test,
intervals=intervals,
pi_true=pi_true.numpy().astype("float32"),
epistemic_uncertainty=False,
lt=False if config["dataset_name"] == "ihdp" else True,
)
for k, v in summary["policy_risk"]["risk"].items():
se = stats.sem(v)
h = se * stats.t.ppf((1 + 0.95) / 2.0, 20 - 1)
print(k, np.mean(v), h)
print("")
for k, v in summary["policy_risk"]["error"].items():
se = stats.sem(v)
h = se * stats.t.ppf((1 + 0.95) / 2.0, 20 - 1)
print(k, np.mean(v), h)
print("")
def paired_t_test(experiment_dir):
summary = {"policy_risk": {"risk": {"true": []}, "error": {},}}
summary_kernel = {"policy_risk": {"risk": {"true": []}, "error": {},}}
for k in GAMMAS.keys():
summary["policy_risk"]["risk"].update({k: []})
summary["policy_risk"]["error"].update({k: []})
summary_kernel["policy_risk"]["risk"].update({k: []})
summary_kernel["policy_risk"]["error"].update({k: []})
for trial_dir in sorted(experiment_dir.iterdir()):
if "trial-" not in str(trial_dir):
continue
config_path = trial_dir / "config.json"
with config_path.open(mode="r") as cp:
config = json.load(cp)
dataset_name = config.get("dataset_name")
ds_test = datasets.DATASETS.get(dataset_name)(**config.get("ds_test"))
tau_true = torch.tensor(ds_test.mu1 - ds_test.mu0).to("cpu")
pi_true = (
tau_true >= 0.0 if config["dataset_name"] == "ihdp" else tau_true < 0.0
)
intervals = load_intervals(file_path=trial_dir / "intervals.json",)
intervals_kernel = load_intervals(
file_path=trial_dir / "intervals_kernels.json"
)
update_summaries(
summary=summary,
dataset=ds_test,
intervals=intervals,
pi_true=pi_true.numpy().astype("float32"),
epistemic_uncertainty=False,
lt=False if config["dataset_name"] == "ihdp" else True,
)
update_summaries(
summary=summary_kernel,
dataset=ds_test,
intervals=intervals_kernel,
pi_true=pi_true.numpy().astype("float32"),
epistemic_uncertainty=False,
lt=False if config["dataset_name"] == "ihdp" else True,
)
print("Paired t-test")
for k in summary["policy_risk"]["risk"].keys():
v_density = np.asarray(summary["policy_risk"]["risk"][k])
v_kernel = | np.asarray(summary_kernel["policy_risk"]["risk"][k]) | numpy.asarray |
print('====================================================================================================')
print('== NCS 문제 1. 아래의 스크립트를 테스트해보면 x 의 원소가 out 의 원소로 변경되었을 것이다.'
'다시 테스트할 때 x 의 원소가 out 의 원소로 변경되지 않게 하려면 어떻게 해야하는가?')
print('====================================================================================================\n')
import copy
import numpy as np
x = np.array([[1.0, -0.5], [-2.0, 3.0]])
print(x)
mask = (x <= 0)
print(mask)
out = x.copy()
print(out)
out[mask] = 0
print(out)
print(x)
# ■ 5장 목차
# 1. 역전파란 무럿인가?
# 2. 계산 그래프
# - 덧셈 그래프
# - 곱셈 그래프
# - 덧셈 그래프 역전파
# - 곱셈 그래프 역전파
# 3. 파이썬으로 단순한 계층 구현하기
# - 덧셈 계층 (역전파 포함)
# - 곱셈 계층 (역전파 포함)
# - Relu 계층 (역전파 포함)
# - sigmoid 계층 (역전파 포함)
# 4. Affine과 softmax계층 구현
# 5. 배치용 affine계층 구현
# 6. softmax wirh loss 계층
# 7. 오차역전파법 구현하기
# ■ 역전파란?
#
# 신경망 학습 처리에서 최소화되는 함수의 경사를 효율적으로 계산하기 위한 방법으로 "오류 역전파"가 있다.
#
# 함수의 경사(기울기)를 계산하는 방법
# 1. 수치 미분 <--- 너무 성능이 느림
# 2. 오류 역전파 <--- 성능이 빠르고 간단하다.
#
# * 순전파 vs 역전파
# - 순전파: 입력층 -> 은닉층 -> 출력층
# - 역전파: 출력층 -> 은닉층 -> 입력층
# 오차를 역전파시킨다.
#
# 출력층부터 차례대로 역방향으로 거슬로 올라가 각 층에 있는 노드의 오차를 계산할 수 있다.
# 각 노드의 오차를 계산하면 그 오차를 사용해서 함수의 기울기를 계산할 수 있다.
# "즉, 전파된 오차를 이용하여 가중치를 조정한다. "
# ↓
# 오차 역전파
# ■ 계산 그래프
#
# "순전파와 역전파의 계산 과정을 그래프로 나타내는 방법"
#
# 계산 그래프의 장점? 국소적 계산을 할 수 있다.
# 국소적 계산이란? 전체에 어떤 일이 벌어지던 상관없이 자신과 관계된
# 정보만으로 다음 결과를 출력할 수 있다는 것
#
# 그림 fig 5-4
#
# ■ 왜? 계산 그래프로 푸는가?
# 전체가 아무리 복잡해도 각 노드에서 단순한 계산에 집중하여 문제를 단순화시킬 수 있다.
#
# ■ 실제로 계산 그래프를 사용하는 가장 큰 이유는?
# 역전파를 통해서 미분을 효율적으로 계산할 수 있다.
# ↓
# 사과 값이 '아주 조금' 올랐을 때 '지불금액'이 얼마나 증가하는지를 알고 싶다는 것이다.
# => 지불금액을 사과 값으로 편미분 하면 ㅇ
# ↓
# 사과값이 1원 오르면 최종금액은 2.2원이 오른다.
print('====================================================================================================')
print('== 문제 100. 위에서 만든 곱셈 클래스를 객체화 시켜서 아래의 사과가격의 총 가격을 구하시오.')
print('====================================================================================================\n')
apple = 200
apple_num = 5
tax = 1.2
class MulLayer:
def __init__(self):
self.x = None
self.y = None
def forward(self, x, y):
self.x = x
self.y = y
out = x * y
return x * y
def backward(self, dout):
dx = dout * self.y
dy = dout * self.x
return dx, dy
apple_layer = MulLayer()
tax_layer = MulLayer()
apple_price = apple_layer.forward(apple, apple_num)
price = tax_layer.forward(apple_price, tax)
price
print('====================================================================================================')
print('== 문제 101. 덧셈 계층을 파이썬으로 구현하시오!')
print('====================================================================================================\n')
class AddLayer:
def __init__(self):
pass
def forward(self, x, y):
return x + y
def backward(self, dout):
dx = dout
dy = dout
return dx, dy
print('====================================================================================================')
print('== 문제 102. 사과 2개와 귤 5개를 구입하면 총 가격이 얼마인지 구하시오!')
print('====================================================================================================\n')
apple_node = MulLayer()
apple_price = apple_node.forward(200, 2)
orange_node = MulLayer()
orange_price = orange_node.forward(300, 5)
fruit_node = AddLayer()
fruit_price = fruit_node.forward(apple_price, orange_price)
total_node = MulLayer()
total_price = total_node.forward(fruit_price, 1.5)
print(total_price)
print('====================================================================================================')
print('== 문제 106. 문제 105번 역전파를 파이썬으로 구현하시오.')
print('====================================================================================================\n')
mul_apple_layer = MulLayer()
mul_mandarin_layer = MulLayer()
mul_pear_layer = MulLayer()
add_apple_mandarin_layer = AddLayer()
add_all_layer = AddLayer()
mul_tax_layer = MulLayer()
##순전파
apple_price = mul_apple_layer.forward(apple, apple_cnt)
mandarin_price = mul_mandarin_layer.forward(mandarin, mandarin_cnt)
pear_price = mul_pear_layer.forward(pear, pear_cnt)
apple_mandarin_price = add_apple_mandarin_layer.forward(apple_price, mandarin_price)
all_price = add_all_layer.forward(apple_mandarin_price, pear_price)
price = mul_tax_layer.forward(all_price, tax)
## 역전파
d_price = 1
d_all_price, d_tax = mul_tax_layer.backward(d_price) #6번
d_apple_mandarin_price, d_pear_price = add_all_layer.backward(d_all_price) #5번
d_apple_price, d_mandarin_price = add_apple_mandarin_layer.backward(d_apple_mandarin_price) #4번
d_apple, d_apple_cnt = mul_apple_layer.backward(d_apple_price) # 1번
d_mandarin, d_mandarin_cnt = mul_mandarin_layer.backward(d_mandarin_price) #2번
d_pear, d_pear_cnt = mul_pear_layer.backward(d_pear_price) # 3번
print(price)
print(d_apple, d_apple_cnt, d_mandarin, d_mandarin_cnt, d_pear, d_pear_cnt)
# ■ ReLU 함수를 만들기 전에 기본적으로 알아야할 문법
import copy
import numpy as np
x = np.array([[1.0, -0.5], [-2.0, 3.0]])
print(x)
mask = (x <= 0)
print(mask)
out = x.copy()
print(out)
out[mask] = 0
print(out)
print(x)
print('====================================================================================================')
print('== 문제 107. ReLU 함수를 파이썬으로 구현하시오!')
print('====================================================================================================\n')
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = x <= 0
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
return dout
print('====================================================================================================')
print('== 문제 108. 아래의 x 변수를 생성하고 x 를 Relu 객체의 forward 함수에 넣으면 무엇이 출력되는지 확인하시오.')
print('====================================================================================================\n')
x = np.array([1.0, 5.0, -2.0, 3.0])
relu = Relu()
print(relu.forward(x))
import numpy as np
x = np.array([5, 6])
w = np.array([[2, 4, 4], [6, 3, 5]])
print(np.dot(x, w))
print('====================================================================================================')
print('== 문제 121. 문제 120번의 순전파를 구하는 함수를 forward 란 이름으로 생성하시오!')
print('====================================================================================================\n')
x = np.array([1, 2])
w = np.array([[1, 3, 5], [2, 4, 6]])
b = np.array([1, 2, 3])
def forward(x, w, b):
return np.dot(x, w) + b
print(forward(x, w, b))
print('====================================================================================================')
print('== 문제 122. 문제 121번의 역전파를 구하는 함수를 backward 란 이름으로 생성하시오!')
print('====================================================================================================\n')
out = | np.array([6, 13, 20], ndmin=2) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 17 17:09:03 2019
@author: duttar
"""
import numpy as np
import math
from scipy.integrate import quad
from scipy.optimize import leastsq
from scipy.sparse import lil_matrix
import sys
sys.path.append('additional_scripts/geompars/')
sys.path.append('additional_scripts/greens/')
from Gorkhamakemesh import *
from greenfunction import *
from collections import namedtuple
def calc_moment(trired, p, q, r, slipall):
'''
calculates the moment magnitude for the non-planar fault
'''
N = trired.shape[0]
moment = np.array([])
for i in range(N):
ind1 = trired[i,:]
ind = ind1.astype(int)
x = p[ind]
y = q[ind]
z = r[ind]
ons = np.array([1,1,1])
xymat = np.vstack((x,y,ons))
yzmat = np.vstack((y,z,ons))
zxmat = np.vstack((z,x,ons))
detxy = np.linalg.det(xymat)
detyz = np.linalg.det(yzmat)
detzx = np.linalg.det(zxmat)
A = 0.5*np.sqrt(detxy**2+detyz**2+detzx**2)
Area = A*1e6
slip1dip = slipall[i]
slip2strike = slipall[N+i]
slip = np.abs(slip1dip) + np.abs(slip2strike)
moment = np.append(moment,3e10*Area*slip)
tot_mom = moment.sum(axis=0)
momentmag = 2*math.log10(tot_mom)/3 - 6.03
return momentmag
def laplacian(trired, p, q, r):
'''
Laplacian for triangular dislocation elements for either strike-slip or dip-slip
Inputs: trired - indices for the fault with TDEs
p,q,r - parameters for the location of TDEs
Outputs: laplac
'''
npat = trired.shape[0]
laplac = lil_matrix((npat,npat))
for i in range(1,npat+1):
# 3 corners of ith patch
indi1 = trired[i-1,:]
indi = indi1.astype(int)
centr_i = np.array([np.mean(p[indi]),np.mean(q[indi]),np.mean(r[indi])])
# now find the 3 triangles sharing the edges
# 1st edge is in following patches
firedge,trash = np.where(trired == indi[0])
# 2nd edge is in following patches
secedge,trash = np.where(trired == indi[1])
# 3rd edge is in following patches:
thiedge,trash = np.where(trired == indi[2])
# find the triangle sharing 1st and 2nd corners
comm12 = np.intersect1d(firedge,secedge)
indkeep = np.where(comm12!=i-1)
tri12 = comm12[indkeep]
# find the triangle sharing 1st and 2nd corners
comm23 = np.intersect1d(secedge,thiedge)
indkeep = np.where(comm23!=i-1)
tri23 = comm23[indkeep]
# find the triangle sharing 1st and 2nd corners
comm31 = np.intersect1d(firedge,thiedge)
indkeep = np.where(comm31!=i-1)
tri31 = comm31[indkeep]
tris = np.array([tri12,tri23,tri31])
tris = np.array([item for item in tris if item.size])
numtris = tris.size
if numtris == 3:
# center of 1st triangle:
indvert1 = trired[tris[0],:]
indvert = indvert1.astype(int)
centr_x = np.mean(p[indvert],axis=1)
centr_y = np.mean(q[indvert],axis=1)
centr_z = np.mean(r[indvert],axis=1)
centr_fir = np.array([centr_x,centr_y,centr_z])
distri1 = np.sqrt((centr_fir[0]-centr_i[0])**2 + (centr_fir[1]-centr_i[1])**2 + \
(centr_fir[2]-centr_i[2])**2)
# center of 2nd triangle
indvert1 = trired[tris[1],:]
indvert = indvert1.astype(int)
centr_x = np.mean(p[indvert],axis=1)
centr_y = | np.mean(q[indvert],axis=1) | numpy.mean |
import codecs
import json
import os
import pickle
from typing import Any, Dict, List
from itertools import chain
from math import log
from ipdb import set_trace as bp
import numpy as np
import torch
from allennlp.data import Vocabulary
from scipy import sparse
PROJ_DIR = "/data/zeyuliu2/neural_persona"
EPSILON = 1e-12
def create_trainable_BatchNorm1d(num_features: int,
weight_learnable: bool = False,
bias_learnable: bool = True,
momentum: float = 0.001,
eps: float = 0.001,
affine: bool = True):
"""
:param num_features: C from an expected input of size (N,C,L) or L from input of size (N,L)
:param weight_learnable: true of want gamma to be learnable
:param bias_learnable: true of want beta to be learnable
:param momentum: the value used for the running_mean and running_var computation.
Can be set to None for cumulative moving average (i.e. simple average)
:param eps: a value added to the denominator for numerical stability.
:param affine: a boolean value that when set to True, this module has learnable affine parameters.
:return:
"""
bn = torch.nn.BatchNorm1d(num_features, eps=eps, momentum=momentum, affine=affine)
if not weight_learnable:
bn.weight.data.copy_(torch.ones(num_features))
bn.weight.requires_grad = weight_learnable
# bias is initialized to be all zero
bn.bias.requires_grad = bias_learnable
return bn
def normal_kl(N0, N1, eps=EPSILON):
"""
(Roughly) A pragmatic translation of:
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Multivariate_normal_distributions
Note:
- N0 and N1 are assumed to be of diagonal covariance matrix
- N0 and N1 are of the same dimension
- N0 and N1 are batch-first
:param N0:
:param N1:
:return:
"""
mu_0, log_var_0 = N0
var_0 = log_var_0.exp()
mu_1, log_var_1 = N1
if len(mu_0.size()) == 3:
_, _, k = mu_0.size()
else:
_, k = mu_0.size()
var_1 = log_var_1.exp()
d = mu_1 - mu_0
tmp_0 = log_var_0.sum(dim=-1)
tmp_0[tmp_0 == 0] = eps
tmp_1 = log_var_1.sum(dim=-1)
first_term = torch.sum(var_0 / var_1, dim=-1)
second_term = torch.sum(d ** 2 / var_1, dim=-1)
result = 0.5 * (first_term + second_term - k + tmp_1 - tmp_0)
return result
def compute_background_log_frequency(vocab: Vocabulary, vocab_namespace: str, precomputed_bg_file=None):
"""
Load in the word counts from the JSON file and compute the
background log term frequency w.r.t this vocabulary.
"""
# precomputed_word_counts = json.load(open(precomputed_word_counts, "r"))
# bp()
# sample a probability tensor from a symmetric dirichlet
log_term_frequency = torch.distributions.dirichlet.Dirichlet(torch.ones(vocab.get_vocab_size(vocab_namespace))).sample()
if precomputed_bg_file is not None:
with open(precomputed_bg_file, "r") as file_:
precomputed_bg = json.load(file_)
else:
precomputed_bg = vocab._retained_counter.get(vocab_namespace) # pylint: disable=protected-access
if precomputed_bg is None:
return log_term_frequency
# bp()
for i in range(vocab.get_vocab_size(vocab_namespace)):
token = vocab.get_token_from_index(i, vocab_namespace)
if token in precomputed_bg:
log_term_frequency[i] = precomputed_bg[token]
elif token in ("@@UNKNOWN@@", "@@PADDING@@", '@@START@@', '@@END@@') or token not in precomputed_bg:
log_term_frequency[i] = 1e-12
# bp()
assert log_term_frequency.sum().allclose(torch.ones(1))
log_term_frequency = torch.log(log_term_frequency)
# return torch.zeros(vocab.get_vocab_size(vocab_namespace))
return log_term_frequency
def log_standard_categorical(logits: torch.Tensor):
"""
Calculates the cross entropy between a (one-hot) categorical vector
and a standard (uniform) categorical distribution.
:param p: one-hot categorical distribution
:return: H(p, u)
Originally from https://github.com/wohlert/semi-supervised-pytorch.
"""
# Uniform prior over y
prior = torch.softmax(torch.ones_like(logits), dim=1)
prior.requires_grad = False
cross_entropy = -torch.sum(logits * torch.log(prior + 1e-8), dim=1)
return cross_entropy
def separate_labeled_unlabeled_instances(text: torch.LongTensor,
classifier_text: torch.Tensor,
label: torch.LongTensor,
metadata: List[Dict[str, Any]]):
"""
Given a batch of examples, separate them into labeled and unlablled instances.
"""
labeled_instances = {}
unlabeled_instances = {}
is_labeled = [int(md['is_labeled']) for md in metadata]
is_labeled = | np.array(is_labeled) | numpy.array |
import numpy as np
from sklearn.metrics import roc_curve, precision_recall_curve, auc
def calc_block_idx(x_min, x_max, y_min, y_max, h_step, w_step, mode):
all_blocks = list()
center = np.array([(y_min + y_max) / 2, (x_min + x_max) / 2])
all_blocks.append(center + center)
if mode > 1:
all_blocks.append(np.array([y_min, center[1]]) + center)
all_blocks.append(np.array([y_max, center[1]]) + center)
all_blocks.append(np.array([center[0], x_min]) + center)
all_blocks.append( | np.array([center[0], x_max]) | numpy.array |
import os
import json
import cvxpy as cp
import networkx as nx
import numpy as np
import torch
import torch.nn as nn
from utils.metrics import RMSE
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from models.driving.FADNet import DrivingNet
from loaders.driving import get_iterator_driving
from loaders.complex_driving import get_iterator_complex_driving
import functools
import operator
EXTENSIONS = {"driving": ".npz"}
# Model size in bit
MODEL_SIZE_DICT = {"driving": 358116680}
# Model computation time in ms
COMPUTATION_TIME_DICT = {"driving": 30.2}
# Tags list
TAGS = ["Train/Loss", "Train/Acc", "Test/Loss", "Test/Acc", "Consensus"]
def args_to_string(args):
"""
Transform experiment's arguments into a string
:param args:
:return: string
"""
args_string = ""
args_to_show = ["experiment", "network_name", "fit_by_epoch", "bz_train",
"lr", "decay", "local_steps"]
for arg in args_to_show:
args_string += arg
args_string += "_" + str(getattr(args, arg)) + "_"
return args_string[:-1]
def get_optimal_mixing_matrix(adjacency_matrix, method="FDLA"):
"""
:param adjacency_matrix: np.array()
:param method:method to construct the mixing matrix weights;
possible are:
FMMC (Fast Mixin Markov Chain), see https://web.stanford.edu/~boyd/papers/pdf/fmmc.pdf
FDLA (Fast Distributed Linear Averaging), https://web.stanford.edu/~boyd/papers/pdf/fastavg.pdf
:return: optimal mixing matrix as np.array()
"""
network_mask = 1 - adjacency_matrix
N = adjacency_matrix.shape[0]
s = cp.Variable()
W = cp.Variable((N, N))
objective = cp.Minimize(s)
if method == "FDLA":
constraints = [W == W.T,
W @ np.ones((N, 1)) == np.ones((N, 1)),
cp.multiply(W, network_mask) == np.zeros((N, N)),
-s * np.eye(N) << W - (np.ones((N, 1)) @ np.ones((N, 1)).T) / N,
W - (np.ones((N, 1)) @ np.ones((N, 1)).T) / N << s * np.eye(N)
]
elif method == "FMMC":
constraints = [W == W.T,
W @ np.ones((N, 1)) == np.ones((N, 1)),
cp.multiply(W, network_mask) == np.zeros((N, N)),
-s * | np.eye(N) | numpy.eye |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 31 16:59:43 2021
@author: timur
"""
import random
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
def generate_models(x, y, degs):
models = []
for deg in degs:
model = np.polyfit(x,y,deg)
models.append(model.tolist())
return models
def compare_values(y1, y2):
y = np.array(y1)
estimated = np.array(y2)
mean_error = (((estimated - y)**2).sum())/len(y)
return 1 - (mean_error/np.var(y))
def compare(y, estimated):
mean_error = (((estimated - y)**2).sum())/len(y)
return 1 - (mean_error/np.var(y))
def l_predict(model, x):
results = np.polyval(model, x)
return results.tolist()
def splitData(xVals, yVals, n):
toTrain = random.sample(range(len(xVals)),
len(xVals)//n)
trainX, trainY, testX, testY = [],[],[],[]
for i in range(len(xVals)):
if i in toTrain:
trainX.append(xVals[i])
trainY.append(yVals[i])
else:
testX.append(xVals[i])
testY.append(yVals[i])
return trainX, trainY, testX, testY
def cross_validating(x,y, degrees, subsets, n):
rSquares = {}
xVals = np.array(x)
yVals = np.array(y)
for f in range(subsets):
trainX, trainY, testX, testY = splitData(xVals, yVals,n)
for d in degrees:
model = np.polyfit(trainX, trainY, d)
estYVals = | np.polyval(model, testX) | numpy.polyval |
import os
import tensorflow as tf
import numpy as np
import rejection_network
class RejectionSystem():
def __init__(self):
self.dir_path = os.path.dirname(os.path.abspath(__file__))
self._train_dir = os.path.join(self.dir_path, "Data/Train/")
self._valid_dir = os.path.join(self.dir_path, "Data/Valid/")
# training setting
self._training_epoches = 100
self._number_of_minibatches = 20
self._rejection_net = rejection_network.Network()
self._initialize_training = True
self._debug = False
def load_data(self):
train_images = np.load(self._train_dir + "train_images.npy")
train_targets = np.load(self._train_dir + "train_targets.npy")
valid_images = np.load(self._valid_dir + "valid_images.npy")
valid_targets = | np.load(self._valid_dir + "valid_targets.npy") | numpy.load |
"""
Python sample script for cross-wavelet analysis and the statistical approach
suggested by Torrence and Compo (1998) using the wavelet module. To run
this script successfully, the `matplotlib` and `progressbar` modules have to
be installed.
Disclaimer
----------
This module is based on routines provided by <NAME> and <NAME>
available at <http://paos.colorado.edu/research/wavelets/>, on routines
provided by <NAME>, <NAME> and <NAME> available at
<http://noc.ac.uk/using-science/crosswavelet-wavelet-coherence>, and
on routines provided by <NAME> available at
<http://cell.biophys.msu.ru/static/swan/>.
This software is released under a BSD-style open source license. Please read
the license file for furter information. This routine is provided as is
without any express or implied warranties whatsoever.
Authors
-------
<NAME>, <NAME>
"""
# Kindly modified by:
# <NAME> (UEA/EST)
# <EMAIL>
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import matplotlib.pyplot as plt
import pycwt as wavelet
from pycwt.helpers import find
from matplotlib.image import NonUniformImage
##--- input time series
data1 = dict(name='Arctic Oscillation',
nick='AO', file='jao.txt')
data2 = dict(name='Baltic Sea ice extent',
nick='BMI', file='jbaltic.txt')
# Loads the data to be analysed.
t1, s1 = np.loadtxt(data1['file'], unpack=True)
t2, s2 = np.loadtxt(data2['file'], unpack=True)
dt = np.diff(t1)[0]
n1 = t1.size
n2 = t2.size
n = min(n1, n2)
# Change the probablity density function (PDF) of the data. The time series
# of Baltic Sea ice extent is highly bi-modal and we therefore transform the
# timeseries into a series of percentiles. The transformed series probably
# reacts 'more linearly' to climate.
s2, _, _ = wavelet.helpers.boxpdf(s2)
# Calculates the standard deviation of each time series for later
# normalization.
std1 = s1.std()
std2 = s2.std()
# I. Continuous wavelet transform
# ===============================
# Calculate the CWT of both normalized time series. The function wavelet.cwt
# returns a a list with containing [wave, scales, freqs, coi, fft, fftfreqs]
# variables.
mother = wavelet.Morlet(6) # Morlet mother wavelet with m=6
slevel = 0.95 # Significance level
dj = 1/12 # Twelve sub-octaves per octaves
s0 = 2 * dt # Starting scale, here 6 months
J = 6 / dj # Seven powers of two with dj sub-octaves
if True:
alpha1, _, _ = wavelet.ar1(s1) # Lag-1 autocorrelation for red noise
alpha2, _, _ = wavelet.ar1(s2) # Lag-1 autocorrelation for red noise
else:
alpha1 = alpha2 = 0.0 # Lag-1 autocorrelation for white noise
# The following routines perform the wavelet transform and siginificance
# analysis for two data sets.
mother = 'morlet'
W1, scales1, freqs1, coi1, _, _ = wavelet.cwt(s1/std1, dt, dj, s0, J, mother)
signif1, fft_theor1 = wavelet.significance(1.0, dt, scales1, 0, alpha1,
significance_level=slevel,
wavelet=mother)
W2, scales2, freqs2, coi2, _, _ = wavelet.cwt(s2/std2, dt, dj, s0, J, mother)
signif2, fft_theor2 = wavelet.significance(1.0, dt, scales2, 0, alpha2,
significance_level=slevel,
wavelet=mother)
power1 = ( | np.abs(W1) | numpy.abs |
import pdb
import numpy as np
import scipy as sp
import scipy.optimize as op
import util
import matplotlib.pyplot as plt
import time
# Laplace Inference -----------------------------------------------------------
def negLogPosteriorUnNorm(xbar, ybar, C_big, d_big, K_bigInv, xdim, ydim):
xbar = np.ndarray.flatten(np.asarray(xbar))
ybar = np.ndarray.flatten(np.asarray(ybar))
T = int(len(d_big)/ydim)
C_big = np.asarray(C_big)
d_big = np.asarray(d_big)
K_bigInv = np.asarray(K_bigInv)
A = np.dot(C_big.T, xbar) + d_big
Aexp = np.exp(A)
L1 = np.dot(Aexp, np.ones(ydim*T))
L2 = - np.dot(ybar, A.T)
L3 = 0.5*np.dot(xbar,np.dot(K_bigInv,xbar))
L = L1 + L2 + L3
# pdb.set_trace()
return L
def negLogPosteriorUnNorm_grad(xbar, ybar, C_big, d_big, K_bigInv, xdim, ydim):
xbar = np.asarray(xbar)
ybar = np.asarray(ybar)
A = np.dot(C_big.T, xbar) + d_big
A = np.float64(A)
Aexp = np.exp(A)
dL1 = np.dot(Aexp,C_big.T)
dL2 = - np.dot(ybar, C_big.T)
dL3 = np.dot(xbar, K_bigInv)
dL = dL1 + dL2 + dL3
return dL
def negLogPosteriorUnNorm_hess(xbar, ybar, C_big, d_big, K_bigInv, xdim, ydim):
xbar = np.asarray(xbar)
ybar = np.asarray(ybar)
T = int(len(xbar)/xdim)
A = np.dot(C_big.T, xbar) + d_big
A = np.float64(A)
Aexp = | np.exp(A) | numpy.exp |
import numpy as np
from os.path import join
from joblib import Parallel, delayed
from struct import calcsize, unpack
import pandas as pd
template_types = {0: 'Noise', 1: 'SS', 2: 'SS_Contaminated', 3: 'SS_Putative', 4: 'MUA', 5: 'Unspesified_1',
6: 'Unspecified_2', 7: 'Unspecified_3'}
def load_extracellular_data_cube(data_cube_filename,
cube_type,
shape_of_spike_trig_avg):
cut_extracellular_data = np.memmap(data_cube_filename,
dtype=cube_type,
mode='r',
shape=shape_of_spike_trig_avg)
return cut_extracellular_data
def _read_unpack(fmt, fh):
return unpack(fmt, fh.read(calcsize(fmt)))
def load_tsne_result(files_dir, filename='result.dat'):
# Read and pass on the results
with open(join(files_dir, filename), 'rb') as output_file:
# The first two integers are the number of samples and the dimensionality
result_samples, result_dims = _read_unpack('ii', output_file)
# Collect the results, but they may be out of order
results = [_read_unpack('{}d'.format(result_dims), output_file) for _ in range(result_samples)]
return np.array(results)
def generate_spike_info(base_folder, files_dir):
spikes_used = np.load(join(files_dir, 'indices_of_spikes_used.npy'))
template_marking = np.load(join(base_folder, 'template_marking.npy'))
spike_templates = np.load(join(base_folder, 'spike_templates.npy'))[spikes_used]
spike_times = np.load(join(base_folder, 'spike_times.npy'))[spikes_used]
indices_of_small_templates = np.load(join(files_dir, 'indices_of_small_templates.npy'))
tsne = load_tsne_result(join(base_folder, files_dir))
columns = ['original_index', 'times', 'template_after_cleaning', 'type_after_cleaning', 'template_after_sorting',
'type_after_sorting', 'template_with_all_spikes_present', 'tsne_x', 'tsne_y', 'probe_position_x',
'probe_position_z']
spike_info = pd.DataFrame(index=np.arange(spikes_used.size), columns=columns)
spike_info['original_index'] = spikes_used
spike_info['times'] = spike_times
spike_info['template_after_cleaning'] = spike_templates
spike_info['type_after_cleaning'] = [template_types[int(template_marking[i])] for i in spike_templates]
spike_info['template_after_sorting'] = spike_info['template_after_cleaning']
spike_info['type_after_sorting'] = spike_info['type_after_cleaning']
spike_info['template_with_all_spikes_present'] = [bool(np.in1d(spike_template, indices_of_small_templates))
for spike_template in spike_templates]
spike_info['tsne_x'] = tsne[:, 0]
spike_info['tsne_y'] = tsne[:, 1]
spike_info.to_pickle(join(files_dir, 'spike_info.df'))
return spike_info
def generate_average_over_selected_spikes_multiprocess(base_folder,
binary_data_filename,
number_of_channels_in_binary_file,
spike_times,
cube_type,
cut_time_points_around_spike=100,
num_of_points_for_baseline=None):
channel_map = np.load(join(base_folder, 'channel_map.npy'))
used_electrodes = np.squeeze(channel_map, axis=1)
num_of_channels = used_electrodes.size
num_of_points_in_spike_trig = cut_time_points_around_spike * 2
data_raw = np.memmap(binary_data_filename, dtype=np.int16, mode='r')
number_of_timepoints_in_raw = int(data_raw.shape[0] / number_of_channels_in_binary_file)
raw_extracellular_data = np.reshape(data_raw, (number_of_channels_in_binary_file, number_of_timepoints_in_raw),
order='F')
unordered_data = Parallel(n_jobs=8)(delayed(_avg_of_eigth_of_selected_spikes)(i,
spike_times,
used_electrodes,
num_of_points_in_spike_trig,
cube_type,
raw_extracellular_data,
num_of_points_for_baseline)
for i in | np.arange(8) | numpy.arange |
import logging
import os
import joblib
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.preprocessing import StandardScaler
from nas_bench_x11.utils import utils
from nas_bench_x11.surrogate_model import SurrogateModel
class VAE(nn.Module):
def __init__(self, input_shape, hidden_dim, num_enc_layers, num_dec_layers, z_dim, dropout_p):
super().__init__()
self.enc_in_layer = nn.Linear(input_shape[1], hidden_dim)
self.enc_fc_layers = [nn.Linear(hidden_dim, hidden_dim) for _ in range(num_enc_layers)]
self.enc_bn_layers = [nn.BatchNorm1d(hidden_dim) for _ in range(num_enc_layers)]
self.enc_drop_layers = [nn.Dropout(dropout_p) for _ in range(num_enc_layers)]
self.enc_out_layer = nn.Linear(hidden_dim, z_dim*2)
self.dec_in_layer = nn.Linear(z_dim, hidden_dim)
self.dec_fc_layers = [nn.Linear(hidden_dim, hidden_dim) for _ in range(num_dec_layers)]
self.dec_bn_layers = [nn.BatchNorm1d(hidden_dim) for _ in range(num_dec_layers)]
self.dec_drop_layers = [nn.Dropout(dropout_p) for _ in range(num_dec_layers)]
self.dec_out_layer = nn.Linear(hidden_dim, input_shape[1])
self.LeakyReLU = nn.LeakyReLU(0.2)
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn(*mu.size())
z = mu + std * eps
return z
def encode(self, tensor):
enc_tensor = self.enc_in_layer(tensor)
for fc, bn, drop in zip(self.enc_fc_layers, self.enc_bn_layers, self.enc_drop_layers):
enc_tensor = self.LeakyReLU(fc(enc_tensor))
enc_tensor = drop(bn(enc_tensor))
mu, logvar = torch.chunk(self.enc_out_layer(enc_tensor), 2, dim=1)
return mu, logvar
def decode(self, tensor):
dec_tensor = self.dec_in_layer(tensor)
for fc, bn, drop in zip(self.dec_fc_layers, self.dec_bn_layers, self.dec_drop_layers):
dec_tensor = F.relu(fc(dec_tensor))
dec_tensor = drop(dec_tensor)
return self.dec_out_layer(dec_tensor)
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
output = self.decode(z)
return output, mu, logvar
class NNSurrogateModel(nn.Module):
def __init__(self, input_shape, hidden_dim, num_layers, out_dim, dropout_p):
super().__init__()
self.inlayer = nn.Linear(input_shape[1], hidden_dim)
self.fclayers = [nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers)]
self.bnlayers = [nn.BatchNorm1d(hidden_dim) for _ in range(num_layers)]
self.droplayers = [nn.Dropout(dropout_p) for _ in range(num_layers)]
self.outlayer = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = F.relu(self.inlayer(x))
for fc, bn, drop in zip(self.fclayers, self.bnlayers, self.droplayers):
x = F.relu(fc(x))
x = drop(x)
return self.outlayer(x)
class VAENNModel(SurrogateModel):
def __init__(self, data_root, log_dir, seed, model_config, data_config, search_space, nb101_api):
super().__init__(data_root, log_dir, seed, model_config, data_config, search_space, nb101_api)
self.model = None
self.model_config["param:objective"] = "reg:squarederror"
self.model_config["param:eval_metric"] = "rmse"
def parse_param_config(self):
identifier = "param:"
param_config = dict()
for key, val in self.model_config.items():
if key.startswith(identifier):
param_config[key.replace(identifier, "")] = val
return param_config
def train(self):
X_train, y_train, _ = self.load_dataset(dataset_type='train', use_full_lc=True)
X_val, y_val, _ = self.load_dataset(dataset_type='val', use_full_lc=True)
X_train, y_train = torch.Tensor(X_train), torch.Tensor(y_train)
param_config = self.parse_param_config()
param_config["seed"] = self.seed
self.num_components = param_config["num_components"]
self.vae = VAE(y_train.shape, param_config["vae_hidden_dim"],
param_config["enc_num_layers"], param_config["dec_num_layers"],
self.num_components, param_config["vae_dropout_p"])
vae_optimizer = torch.optim.Adam(self.vae.parameters(), lr=param_config["vae_learning_rate"])
def vae_loss_fn(x, recon_x, mu, logvar):
MSE = nn.MSELoss(reduction='sum')(recon_x, x)
KLD = -0.5 * torch.sum(1 + logvar - mu**2 - logvar.exp())
return MSE + KLD
self.model = NNSurrogateModel(X_train.shape, param_config["nn_hidden_dim"], param_config["nn_num_layers"], self.num_components, param_config["nn_dropout_p"])
nn_criterion = nn.MSELoss()
nn_optimizer = optim.Adam(self.model.parameters(), lr=param_config["nn_learning_rate"])
self.ss = StandardScaler()
pX, py = torch.tensor(X_train, dtype=torch.float32), torch.tensor(self.ss.fit_transform(y_train), dtype=torch.float32)
train_dataset = TensorDataset(pX, py)
train_dataloader = DataLoader(train_dataset, batch_size=32)
self.vae.train()
num_epochs = param_config["vae_num_epochs"]
for epoch in range(num_epochs):
running_loss = 0.0
for i, (_, y) in enumerate(train_dataloader, 0):
vae_optimizer.zero_grad()
outputs = self.vae(y)
loss = vae_loss_fn(y, *outputs)
loss.backward()
vae_optimizer.step()
running_loss += loss.item()
if not i % 500:
print(f'VAE [{epoch+1}, {i+1}] loss: {running_loss / 1000:.3f}')
running_loss = 0.0
self.vae.eval()
self.model.train()
num_epochs = param_config["nn_num_epochs"]
for epoch in range(num_epochs):
running_loss = 0.0
for i, (X, y) in enumerate(train_dataloader, 0):
nn_optimizer.zero_grad()
with torch.no_grad():
_, z, _ = self.vae(y)
loss = nn_criterion(self.model(X), z)
loss.backward()
nn_optimizer.step()
running_loss += loss.item()
if not i % 500:
print(f'NN [{epoch+1}, {i+1}] loss: {running_loss / 1000:.3f}')
running_loss = 0.0
train_pred, val_pred = self.eval(X_train), self.eval(X_val)
# metrics for final prediction
train_pred_final = np.array(train_pred)
val_pred_final = | np.array(val_pred) | numpy.array |
import numpy as np
import torch
import glob
import os
import pickle
import argparse
from torch.utils.data import DataLoader
from torch.utils.data.dataset import (TensorDataset,
ConcatDataset)
from i2i.cyclegan import CycleGAN
from util import (convert_to_rgb,
H5Dataset,
DatasetFromFolder)
from torchvision import transforms
from skimage.io import imsave, imread
from skimage.transform import rescale, resize
from importlib import import_module
def get_face_swap_iterators(bs):
"""DepthNet + GT <-> frontal GT faces"""
filename_vgg = "data/vgg/vgg.h5"
filename_celeba = "data/celeba/celebA.h5"
filename_celeba_swap = "data/celeba_faceswap/celeba_faceswap.h5"
a_train = H5Dataset(filename_celeba_swap, 'imgs', train=True)
vgg_side_train = H5Dataset('%s' % filename_vgg, 'src_GT', train=True)
vgg_frontal_train = H5Dataset('%s' % filename_vgg, 'tg_GT', train=True)
celeba_side_train = H5Dataset('%s' % filename_celeba, 'src_GT', train=True)
celeba_frontal_train = H5Dataset('%s' % filename_celeba, 'tg_GT', train=True)
b_train = ConcatDataset((vgg_side_train,
vgg_frontal_train,
celeba_side_train,
celeba_frontal_train))
a_valid = H5Dataset(filename_celeba_swap, 'imgs', train=False)
vgg_side_valid = H5Dataset('%s' % filename_vgg, 'src_GT', train=False)
vgg_frontal_valid = H5Dataset('%s' % filename_vgg, 'tg_GT', train=False)
celeba_side_valid = H5Dataset('%s' % filename_celeba, 'src_GT', train=False)
celeba_frontal_valid = H5Dataset('%s' % filename_celeba, 'tg_GT', train=False)
b_valid = ConcatDataset((vgg_side_valid,
vgg_frontal_valid,
celeba_side_valid,
celeba_frontal_valid))
loader_train_a = DataLoader(a_train, batch_size=bs, shuffle=True)
loader_train_b = DataLoader(b_train, batch_size=bs, shuffle=True)
loader_valid_a = DataLoader(a_valid, batch_size=bs, shuffle=True)
loader_valid_b = DataLoader(b_valid, batch_size=bs, shuffle=True)
return loader_train_a, loader_train_b, loader_valid_a, loader_valid_b
def image_dump_handler(out_folder, scale_factor=1.):
def _fn(losses, inputs, outputs, kwargs):
if kwargs['iter'] != 1:
return
A_real = inputs[0].data.cpu().numpy()
B_real = inputs[1].data.cpu().numpy()
atob, atob_btoa, btoa, btoa_atob = \
[elem.data.cpu().numpy() for elem in outputs.values()]
outs_np = [A_real, atob, atob_btoa, B_real, btoa, btoa_atob]
# determine # of channels
n_channels = outs_np[0].shape[1]
w, h = outs_np[0].shape[-1], outs_np[0].shape[-2]
# possible that A_real.bs != B_real.bs
bs = | np.min([outs_np[0].shape[0], outs_np[3].shape[0]]) | numpy.min |
"""
@Author: <NAME>
@Since: 12/24/2021 3:12 PM
"""
import copy
import json
import streamlit as st
import pandas as pd
import base64
import numpy as np
import PIL.Image as Image
import requests
import io
from NCFModel import NCFModel
from imdb_request import get_film_info, get_title_id
model = NCFModel()
st.set_page_config(
page_title="Group 8 - NCF",
)
st.title('Group 8')
st.markdown("""
* **Deployed by: <NAME> - 19127368**
\n**Other members:**
* 19127353 - Lê Tấn Đạt
* 19127429 - Trần Tuấn Kha
* 19127651 - Trần Anh Túc
""")
st.title('Recommender System')
st.title('Neural Collaborative Filtering')
st.markdown("""
This projects use Movielens 100K dataset!
* **Python libraries:** base64, pandas, streamlit, numpy, ...
* **Paper Source:** [paperswithcode.com](https://paperswithcode.com/paper/neural-collaborative-filtering)
* **Model reference:** [github.com/microsoft](https://github.com/microsoft/recommenders)
* **Source Code: ** [github.com/MinhDuc](https://github.com/steveho29/NCFApp)
""")
st.sidebar.header('Filter Dataset')
def filedownload(df, name):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions
href = f'<a href="data:file/csv;base64,{b64}" download="{name}.csv">Download CSV File</a>'
return href
@st.cache
def load_train_data():
train_data = copy.deepcopy(model.train_data)
# train_data.sort_values(by=['userID', 'itemID'], ascending=[True, True])
item_data = copy.deepcopy(model.item_data)
return item_data, train_data
def getImage(url):
return Image.open(io.BytesIO(requests.get(url).content))
item_data, train_data = load_train_data()
# genre_data = sorted(train_data['genre'].unique())
genre_data = model.genre_columns
# Sidebar - Genre selection
selected_genre = st.sidebar.multiselect('Movie Genres', genre_data, genre_data)
# Sidebar - Rate selection
rating = [i for i in range(1, 6)]
selected_rating = st.sidebar.multiselect('Ratings', rating, rating)
# Filtering data
df_train_data_selected = train_data[(train_data.genre.isin(selected_genre)) & (train_data.rating.isin(selected_rating))]
st.header('Dataset Movielens 100k Ratings')
st.write('Collected from 943 Users - 1682 Movies')
st.write('Data Dimension: ' + str(df_train_data_selected.shape[0]) + ' rows and ' + str(
df_train_data_selected.shape[1]) + ' columns.')
des = train_data.describe().drop(columns=['userID', 'itemID'])[:2]
st.dataframe(df_train_data_selected)
# -------------- DOWNLOAD BUTTON -----------------
st.markdown(filedownload(df_train_data_selected, "Group8 - MovieLens 100k Dataset.csv"), unsafe_allow_html=True)
st.write('Description: ')
st.dataframe(des)
genre_bar_chart = [(len(train_data[train_data['genre'] == genre]))for genre in genre_data]
genre_bar_chart = pd.DataFrame(np.array(genre_bar_chart).reshape(1, len(genre_data)), columns=genre_data)
st.bar_chart(genre_bar_chart)
# -------------- TRAINING LOSS LINE CHART-----------------
st.header('Training Loss 100 Epochs')
neumf_error = np.load('Error_neumf.npy')
gmf_error = np.load('Error_gmf.npy')
mlp_error = np.load("Error_mlp.npy")
mlp_sigmoid_error = np.load("Error_mlp_sigmoid.npy")
st.subheader("MLP Model with ReLU activation function")
st.write('MLP with ReLu has training loss\'s really weird (is a line)')
st.line_chart(pd.DataFrame( | np.array([mlp_error]) | numpy.array |
import os
os.system("rm ks_cpp.so")
import numpy as np
from KS_Sampling import ks_sampling, ks_sampling_mem
np.set_printoptions(precision=6, linewidth=120, suppress=True)
np.random.seed(0)
if __name__ == '__main__':
# -- Example 1 -- 5000 data points, feature vector length 100
n_sample = 5000
n_feature = 100
X = | np.random.randn(n_sample, n_feature) | numpy.random.randn |
import cv2
import numpy as np
from keras.models import load_model
from statistics import mode
from utils.datasets import get_labels
# from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
import os
import matplotlib.pyplot as plt
from mtcnn import MTCNN
import time
# USE_WEBCAM = True # If false, loads video file source
detector = MTCNN(min_face_size=10)
# parameters for loading data and images
emotion_model_path = './models/emotion_model.hdf5'
emotion_labels = get_labels('fer2013')
# hyper-parameters for bounding boxes shape
frame_window = 25
emotion_offsets = (20, 40)
# loading models
face_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml')
emotion_classifier = load_model(emotion_model_path)
# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3] # why ?????
print(emotion_target_size)
# starting lists for calculating modes
emotion_window = []
# starting video streaming
#cv2.namedWindow('window_frame')
#video_capture = cv2.VideoCapture(0)
'''
# Select video or webcam feed
cap = None
if (USE_WEBCAM == True):
cap = cv2.VideoCapture(0) # Webcam source
else:
cap = cv2.VideoCapture('./demo/dinner.mp4') # Video file source
'''
def get_namefile_detail():
osname = '' #os.getcwd()
list_emotion = os.listdir('/Volumes/NO NAME/emotions/jaffedbase')#'EmotionSet')#'/Volumes/NO NAME/FERG_DB_256/aia')
if '._.DS_Store' in list_emotion:
list_emotion.remove('._.DS_Store')
'''
osname+='/Volumes/NO NAME/emotions/jaffedbase'#'EmotionSet'#'/Volumes/NO NAME/FERG_DB_256/aia'
osname_const = osname
count = 0
list_emotion_images_namefile = []
for emotion in list_emotion:
osname += '/' +emotion
list_images = os.listdir(osname)
if '.DS_Store' in list_images:
list_images.remove('.DS_Store')
count+=len(list_images)
# for f in list_images :
# list_emotion_images_namefile.append(osname+'/'+f)
# osname = osname_const
'''
#result_real = list_emotion_images_namefile[0].split('/')[1]
print(list_emotion)
return list_emotion
#return list_emotion_images_namefile
happy = 0
neutral = 0
sad = 0
temp = 0
time_ = []
class TestScale:
global time_
global neutral,happy,sad
global temp
def __init__(self,namefile):
self.namefile = namefile
def emotion_detection(self):
global time_
global temp
frame = cv2.imread(self.namefile)
#print(frame)
gray_image = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
#MTCNN
faces = detector.detect_faces(frame)
faces = [face['box'] for face in faces]
faces = faces
print([faces])
# exit()
''' HAARCASCADE
faces = face_cascade.detectMultiScale(frame, scaleFactor=1.1,minNeighbors=1, minSize=(30,30),flags = cv2.CASCADE_SCALE_IMAGE)
#print(faces)
'''
for face_coordinates in faces: #if 1 :
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
cv2.imshow('f',gray_face)
# print(gray_face)
try:
gray_face = cv2.resize(gray_face, (emotion_target_size)) #interpolation : Linear
except:
#pass
continue
# print(gray_face)
# print(gray_face.shape)
b=time.time()
gray_face = preprocess_input(gray_face, True)
# print(gray_face)
gray_face = np.expand_dims(gray_face, 0) #axis = 0
# print(gray_face.shape)
gray_face= | np.expand_dims(gray_face,-1) | numpy.expand_dims |
import os
from torch.utils.data import TensorDataset, DataLoader, Dataset, Sampler
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from tqdm import tqdm
import scanpy as sc
class Setting:
"""Parameters for training"""
def __init__(self):
self.epoch = 300
self.lr = 0.0005
class Net(nn.Module):
def __init__(self, feature_num):
super(Net, self).__init__()
self.layer_1 = nn.Linear(feature_num, 500)
self.layer_2 = nn.Linear(500, 20)
def forward(self, x):
x = F.relu(self.layer_1(x))
x = self.layer_2(x)
return x
class CellDataset(Dataset):
def __init__(self, hdf_file, root_dir):
self.data_frame = pd.read_hdf(root_dir + hdf_file)
def __len__(self):
return len(self.data_frame)
def __getitem__(self, idx):
train_x = self.data_frame.iloc[idx, :-1]
train_x = torch.tensor(train_x, dtype=torch.float32)
return train_x
class NPairSampler(Sampler):
def __init__(self, labels):
self.labels = labels
def generate_npairs(self, labels):
if isinstance(labels, torch.Tensor):
labels = labels.detach().cpu().numpy()
label_set, count = | np.unique(labels, return_counts=True) | numpy.unique |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
"""
This subpackage performs system tests on the control module of pelicun.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import truncnorm as tnorm
from copy import deepcopy
import os, sys, inspect
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0,os.path.dirname(parent_dir))
from pelicun.control import *
from pelicun.uq import mvn_orthotope_density as mvn_od
from pelicun.tests.test_pelicun import prob_allclose, prob_approx
# -----------------------------------------------------------------------------
# FEMA_P58_Assessment
# -----------------------------------------------------------------------------
def test_FEMA_P58_Assessment_central_tendencies():
"""
Perform a loss assessment with customized inputs that reduce the
dispersion of calculation parameters to negligible levels. This allows us
to test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())[0]
assert RV_EDP.theta[0] == pytest.approx(0.5 * g)
assert RV_EDP.theta[1] == pytest.approx(0.5 * g * 1e-6, abs=1e-7)
assert RV_EDP._distribution == 'lognormal'
# QNT
assert A._QNT_dict is None
#RV_QNT = A._RV_dict['QNT']
#assert RV_QNT is None
# FRG
RV_FRG = list(A._FF_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_FRG]).T
assert_allclose(thetas, np.array([0.444, 0.6, 0.984]) * g, rtol=0.01)
assert_allclose(betas, np.array([0.3, 0.4, 0.5]), rtol=0.01)
rho = RV_FRG[0].RV_set.Rho()
assert_allclose(rho, np.ones((3, 3)), rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_FRG])
# RED
RV_RED = list(A._DV_RED_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_RED]).T
assert_allclose(mus, np.ones(2), rtol=0.01)
assert_allclose(sigmas, np.array([1e-4, 1e-4]), rtol=0.01)
rho = RV_RED[0].RV_set.Rho()
assert_allclose(rho, np.array([[1, 0], [0, 1]]), rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_RED])
assert_allclose (RV_RED[0].truncation_limits, [0., 2.], rtol=0.01)
assert_allclose (RV_RED[1].truncation_limits, [0., 4.], rtol=0.01)
# INJ
RV_INJ = list(A._DV_INJ_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_INJ]).T
assert_allclose(mus, np.ones(4), rtol=0.01)
assert_allclose(sigmas, np.ones(4) * 1e-4, rtol=0.01)
rho = RV_INJ[0].RV_set.Rho()
rho_target = np.zeros((4, 4))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_INJ])
assert_allclose(RV_INJ[0].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[1].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[2].truncation_limits, [0., 10.], rtol=0.01)
assert_allclose(RV_INJ[3].truncation_limits, [0., 10.], rtol=0.01)
# REP
RV_REP = list(A._DV_REP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_REP]).T
assert_allclose(thetas, np.ones(6), rtol=0.01)
assert_allclose(betas, np.ones(6) * 1e-4, rtol=0.01)
rho = RV_REP[0].RV_set.Rho()
rho_target = np.zeros((6, 6))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_REP])
# ------------------------------------------------------------------------
A.define_loss_model()
# QNT (deterministic)
QNT = A._FG_dict['T0001.001']._performance_groups[0]._quantity
assert QNT == pytest.approx(50., rel=0.01)
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# TIME
T_check = A._TIME.describe().T.loc[['hour','month','weekday?'],:]
assert_allclose(T_check['mean'], np.array([11.5, 5.5, 5. / 7.]), rtol=0.05)
assert_allclose(T_check['min'], np.array([0., 0., 0.]), rtol=0.01)
assert_allclose(T_check['max'], np.array([23., 11., 1.]), rtol=0.01)
assert_allclose(T_check['50%'], np.array([12., 5., 1.]), atol=1.0)
assert_allclose(T_check['count'], np.array([10000., 10000., 10000.]),
rtol=0.01)
# POP
P_CDF = A._POP.describe(np.arange(1, 27) / 27.).iloc[:, 0].values[4:]
vals, counts = np.unique(P_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]), rtol=0.01)
assert_allclose(counts, np.array([14, 2, 7, 5]), atol=1)
# COL
COL_check = A._COL.describe().T
assert COL_check['mean'].values[0] == pytest.approx(0.5, rel=0.05)
assert len(A._ID_dict['non-collapse']) == pytest.approx(5000, rel=0.05)
assert len(A._ID_dict['collapse']) == pytest.approx(5000, rel=0.05)
# DMG
DMG_check = A._DMG.describe().T
assert_allclose(DMG_check['mean'], np.array([17.074, 17.074, 7.9361]),
rtol=0.1, atol=1.0)
assert_allclose(DMG_check['min'], np.zeros(3), rtol=0.01)
assert_allclose(DMG_check['max'], np.ones(3) * 50.0157, rtol=0.05)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# RED
DV_RED = A._DV_dict['red_tag'].describe().T
assert_allclose(DV_RED['mean'], np.array([0.341344, 0.1586555]), rtol=0.1)
# INJ - collapse
DV_INJ_C = deepcopy(A._COL[['INJ-0', 'INJ-1']])
DV_INJ_C.dropna(inplace=True)
NC_count = DV_INJ_C.describe().T['count'][0]
assert_allclose(NC_count, np.ones(2) * 5000, rtol=0.05)
# lvl 1
vals, counts = np.unique(DV_INJ_C.iloc[:, 0].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.1, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# lvl 2
vals, counts = np.unique(DV_INJ_C.iloc[:, 1].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.9, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# INJ - non-collapse
DV_INJ_NC = deepcopy(A._DV_dict['injuries'])
DV_INJ_NC[0].dropna(inplace=True)
assert_allclose(DV_INJ_NC[0].describe().T['count'], np.ones(2) * 5000,
rtol=0.05)
# lvl 1 DS2
I_CDF = DV_INJ_NC[0].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 1 DS3
I_CDF = DV_INJ_NC[0].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 2 DS2
I_CDF = DV_INJ_NC[1].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl2 DS3
I_CDF = DV_INJ_NC[1].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# REP
assert len(A._ID_dict['non-collapse']) == len(A._ID_dict['repairable'])
assert len(A._ID_dict['irreparable']) == 0
# cost
DV_COST = A._DV_dict['rec_cost']
# DS1
C_CDF = DV_COST.iloc[:, 0]
C_CDF = np.around(C_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 2500], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
C_CDF = DV_COST.iloc[:, 1]
C_CDF = np.around(C_CDF / 100., decimals=0) * 100.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 25000], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
C_CDF = DV_COST.iloc[:, 2]
C_CDF = np.around(C_CDF / 1000., decimals=0) * 1000.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 250000], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# time
DV_TIME = A._DV_dict['rec_time']
# DS1
T_CDF = DV_TIME.iloc[:, 0]
T_CDF = np.around(T_CDF, decimals=1)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 2.5], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
T_CDF = DV_TIME.iloc[:, 1]
T_CDF = np.around(T_CDF, decimals=0)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 25], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
T_CDF = DV_TIME.iloc[:, 2]
T_CDF = np.around(T_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 250], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
assert_allclose(S[('event time', 'month')], A._TIME['month'] + 1)
assert_allclose(S[('event time', 'weekday?')], A._TIME['weekday?'])
assert_allclose(S[('event time', 'hour')], A._TIME['hour'])
assert_allclose(S[('inhabitants', '')], A._POP.iloc[:, 0])
assert SD.loc[('collapses', 'collapsed'), 'mean'] == pytest.approx(0.5,
rel=0.05)
assert SD.loc[('collapses', 'mode'), 'mean'] == 0.
assert SD.loc[('collapses', 'mode'), 'count'] == pytest.approx(5000,
rel=0.05)
assert SD.loc[('red tagged', ''), 'mean'] == pytest.approx(0.5, rel=0.05)
assert SD.loc[('red tagged', ''), 'count'] == pytest.approx(5000, rel=0.05)
for col in ['irreparable', 'cost impractical', 'time impractical']:
assert SD.loc[('reconstruction', col), 'mean'] == 0.
assert SD.loc[('reconstruction', col), 'count'] == pytest.approx(5000,
rel=0.05)
RC = deepcopy(S.loc[:, ('reconstruction', 'cost')])
RC_CDF = np.around(RC / 1000., decimals=0) * 1000.
vals, counts = np.unique(RC_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]) * 1000.)
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
RT = deepcopy(S.loc[:, ('reconstruction', 'time-parallel')])
RT_CDF = np.around(RT, decimals=0)
vals, counts = np.unique(RT_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]))
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
assert_allclose(S.loc[:, ('reconstruction', 'time-parallel')],
S.loc[:, ('reconstruction', 'time-sequential')])
CAS = deepcopy(S.loc[:, ('injuries', 'sev1')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.075, 0.15, 0.25, 0.3, 0.5, 1.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2, 2.5, 7, 5]) / 56., atol=0.01,
rtol=0.1)
CAS = deepcopy(S.loc[:, ('injuries', 'sev2')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.025, 0.05, 0.1, 2.25, 4.5, 9.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2.5, 2, 7, 5]) / 56., atol=0.01,
rtol=0.1)
def test_FEMA_P58_Assessment_EDP_uncertainty_basic():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_2.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_2.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
assert_allclose(thetas, [9.80665, 12.59198, 0.074081, 0.044932], rtol=0.02)
assert_allclose(betas, [0.25, 0.25, 0.3, 0.4], rtol=0.02)
rho = RV_EDP[0].RV_set.Rho()
rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
assert_allclose(rho, rho_target, atol=0.05)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer(
[0.3, 0.4], [0.3, 0.4]),
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == pytest.approx(col_target, rel=0.1)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000. for i in
range(8)]
DMG_1_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.1]))[
0]
DMG_2_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.1, 0.1]))[
0]
DMG_1_PFA = mvn_od(np.log([0.074081, 9.80665]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
DMG_2_PFA = mvn_od(np.log([0.074081, 12.59198]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert DMG_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert DMG_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert DMG_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1021 and 1022
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2011 and 2012
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2021 and 2022
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 9.80665]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert RED_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert RED_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert RED_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log([0.074081, 0.044932, 9.80665, 12.59198]),
np.array(
[[1.0, 0.7, 0.3, 0.3], [0.7, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.6],
[0.3, 0.3, 0.6, 1.0]]) * np.outer(
[0.3, 0.4, 0.25, 0.25],
[0.3, 0.4, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[0.05488, 0.05488, 9.80665, 9.80665]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
def test_FEMA_P58_Assessment_EDP_uncertainty_detection_limit():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
This test differs from the basic case in having unreliable EDP values above
a certain limit - a typical feature of interstory drifts in dynamic
simulations. Such cases should not be a problem if the limits can be
estimated and they are specified as detection limits in input file.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_3.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_3.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:, 2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_failed_analyses():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
Here we use EDP results with unique values assigned to failed analyses.
In particular, PID=1.0 and PFA=100.0 are used when an analysis fails.
These values shall be handled by detection limits of 10 and 100 for PID
and PFA, respectively.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_4.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_4.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:,2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:,2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_3D():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
In this test we look at the propagation of EDP values provided for two
different directions. (3D refers to the numerical model used for response
estimation.)
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_5.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_5.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 8.65433, 12.59198, 11.11239,
0.074081, 0.063763, 0.044932, 0.036788]
EDP_beta_target = [0.25, 0.25, 0.25, 0.25, 0.3, 0.3, 0.4, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.05)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = np.array([
[1.0, 0.8, 0.6, 0.5, 0.3, 0.3, 0.3, 0.3],
[0.8, 1.0, 0.5, 0.6, 0.3, 0.3, 0.3, 0.3],
[0.6, 0.5, 1.0, 0.8, 0.3, 0.3, 0.3, 0.3],
[0.5, 0.6, 0.8, 1.0, 0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3, 1.0, 0.8, 0.7, 0.6],
[0.3, 0.3, 0.3, 0.3, 0.8, 1.0, 0.6, 0.7],
[0.3, 0.3, 0.3, 0.3, 0.7, 0.6, 1.0, 0.8],
[0.3, 0.3, 0.3, 0.3, 0.6, 0.7, 0.8, 1.0]])
large_rho_ids = np.where(EDP_rho_target >= 0.5)
small_rho_ids = np.where(EDP_rho_target < 0.5)
assert_allclose(EDP_rho_test[large_rho_ids], EDP_rho_target[large_rho_ids],
atol=0.1)
assert_allclose(EDP_rho_test[small_rho_ids], EDP_rho_target[small_rho_ids],
atol=0.2)
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
theta_PID = np.log(EDP_theta_target[4:])
COV_PID = EDP_COV_test[4:, 4:]
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(theta_PID, COV_PID,
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == pytest.approx(col_target, rel=0.1, abs=0.05)
# DMG
realization_count = float(A._AIM_in['general']['realizations'])
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / realization_count for i in
range(8)]
DMG_1_1_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_2_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_1_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_2_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 9.80665, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 9.80665,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_ref = [DMG_1_1_PID, DMG_1_2_PID, DMG_2_1_PID, DMG_2_2_PID,
DMG_1_1_PFA, DMG_1_2_PFA, DMG_2_1_PFA, DMG_2_2_PFA]
assert_allclose(DMG_check, DMG_ref, rtol=0.10, atol=0.01)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 249., 624., 1251., 1875.]
T_target = [0., 0.249, 0.624, 1.251, 1.875]
# PG 1011
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.05488, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([0.05488, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.05488, 0.05488, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 0].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 0].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1012
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.05488, 0.1, 0.05488, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 1].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 1].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1021
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.05488, 0.1, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 2].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 2].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
#print('------------------------')
#print('P_target')
#print(P_target)
#print('------------------------')
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1022
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.05488, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.1, 0.05488, 0.1]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.05488, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 3].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 5)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 3].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 5)]
P_test = P_test[np.where(P_test > 5)]
P_test = P_test / realization_count
assert_allclose(P_target[:-1], P_test[:4], atol=0.05)
assert_allclose(C_target[:-1], C_test[:4], rtol=0.001)
assert_allclose(T_target[:-1], T_test[:4], rtol=0.001)
# PG 2011
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 4].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 4].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 5].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 5].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target[:4], P_test[:4], atol=0.05)
assert_allclose(C_target[:4], C_test[:4], rtol=0.001)
assert_allclose(T_target[:4], T_test[:4], rtol=0.001)
# PG 2021
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 6].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 6].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 7].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 7].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / realization_count).values
assert_allclose(RED_check, DMG_ref, atol=0.02, rtol=0.10)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
upper=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 0.05488,
0.05488, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / realization_count
assert P_no_RED_target == pytest.approx(P_no_RED_test, abs=0.03)
def test_FEMA_P58_Assessment_EDP_uncertainty_single_sample():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
In this test we provide only one structural response result and see if it
is properly handled as a deterministic value or a random EDP using the
additional sources of uncertainty.
"""
print()
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_6.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_6.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert | np.all([rv.distribution == 'lognormal' for rv in RV_EDP]) | numpy.all |
import tensorflow as tf
import os
import sys
sys.path.append('..')
import tools as tools
import numpy as np
import matplotlib.pyplot as plt
GPU='0'
def load_real_rgbs(test_mv=5):
obj_rgbs_folder ='./Data_sample/amazon_real_rgbs/airfilter/'
rgbs = []
rgbs_views = sorted(os.listdir(obj_rgbs_folder))
for v in rgbs_views:
if not v.endswith('png'): continue
rgbs.append(tools.Data.load_single_X_rgb_r2n2(obj_rgbs_folder + v, train=False))
rgbs = np.asarray(rgbs)
x_sample = rgbs[0:test_mv, :, :, :].reshape(1, test_mv, 127, 127, 3)
return x_sample, None
def load_shapenet_rgbs(test_mv=8):
obj_rgbs_folder = './Data_sample/ShapeNetRendering/03001627/1a6f615e8b1b5ae4dbbc9440457e303e/rendering/'
obj_gt_vox_path ='./Data_sample/ShapeNetVox32/03001627/1a6f615e8b1b5ae4dbbc9440457e303e/model.binvox'
rgbs=[]
rgbs_views = sorted(os.listdir(obj_rgbs_folder))
for v in rgbs_views:
if not v.endswith('png'): continue
rgbs.append(tools.Data.load_single_X_rgb_r2n2(obj_rgbs_folder + v, train=False))
rgbs = | np.asarray(rgbs) | numpy.asarray |
import matplotlib.pyplot as plt
import gym
import time
from csv import writer
from stable_baselines3.common.type_aliases import GymObs, GymStepReturn
from typing import Union
import numpy as np
import os
from typing import Optional
from autoencoder import load_ae
import gym.wrappers
class AutoEncoderWrapper(gym.Wrapper):
def __init__(self, env, ae_path):
super().__init__(env)
self.env = env
assert ae_path is not None
self.ae = load_ae(ae_path)
self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.ae.z_size,), dtype=np.float32)
def reset(self):
return self.ae.encode_from_raw_image(self.env.reset()[:, :, ::-1]).flatten()
def step(self, action):
obs, reward, done, infos = self.env.step(action)
return self.ae.encode_from_raw_image(obs[:, :, ::-1]).flatten(), reward, done, infos
class AutoEncoderHistoryWrapper(gym.Wrapper):
def __init__(self, env: gym.Env, ae_path, num_history=10, max_throttle=1.0, min_throtthle=0.0, left_steering=-1.0, right_steering=1.0) -> None:
super().__init__(env)
self.env = env
self.ae = load_ae(ae_path)
self.max_throttle = max_throttle
self.min_throttle = min_throtthle
self.left_sterring = left_steering
self.right_steering = right_steering
self.num_command = 2
self.steering_diff = 0.15-1e-5
self.num_history = num_history
self.history = np.zeros((1, self.num_command*self.num_history), dtype=np.float32)
self.action_space = gym.spaces.Box(
low=np.array([np.float32(self.left_sterring), np.float32(self.min_throttle)]),
high=np.array([np.float32(self.right_steering), np.float32(self.max_throttle)]),
dtype=np.float32
)
self.observation_space = gym.spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(1,self.ae.z_size + self.num_command*self.num_history),
dtype=np.float32
)
def reset(self, **kwargs) -> GymObs:
obs = self.ae.encode_from_raw_image(self.env.reset()[:, :, ::-1]).flatten()
obs = np.reshape(obs, (1, self.ae.z_size))
self.history = np.zeros((1, self.num_command*self.num_history), dtype=np.float32)
observation = np.concatenate((obs, self.history), axis=-1)
return observation
def step(self, action: Union[np.ndarray, int]) -> GymStepReturn:
# last_steering = self.history[0, -2]
# diff = np.clip(action[0] - last_steering, -self.steering_diff, self.steering_diff)
# #print(f"pred {action[0]} - last {last_steering} - now {last_steering + diff}")
# action[0] = last_steering + diff
self.history = np.roll(self.history, shift=-self.num_command, axis=-1)
self.history[..., -self.num_command:] = action
obs, reward, done, info = self.env.step(action)
obs = self.ae.encode_from_raw_image(obs[:, :, ::-1]).flatten()
obs = | np.reshape(obs, (1, self.ae.z_size)) | numpy.reshape |
from __future__ import division
from collections import defaultdict
import numpy as np
from time import time
import random
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# import tensorflow as tf
class DataModule():
def __init__(self, conf, filename):
self.conf = conf
self.data_dict = {}
self.terminal_flag = 1
self.filename = filename
self.index = 0
####### Initalize Procedures #######
def prepareModelSupplement(self, model):
data_dict = {}
if 'CONSUMED_ITEMS_SPARSE_MATRIX' in model.supply_set:
self.generateConsumedItemsSparseMatrix()
#self.arrangePositiveData()
data_dict['CONSUMED_ITEMS_INDICES_INPUT'] = self.consumed_items_indices_list
data_dict['CONSUMED_ITEMS_VALUES_INPUT'] = self.consumed_items_values_list
data_dict['CONSUMED_ITEMS_VALUES_WEIGHT_AVG_INPUT'] = self.consumed_items_values_weight_avg_list
data_dict['CONSUMED_ITEMS_NUM_INPUT'] = self.consumed_item_num_list
data_dict['CONSUMED_ITEMS_NUM_DICT_INPUT'] = self.user_item_num_dict
data_dict['USER_ITEM_SPARSITY_DICT'] = self.user_item_sparsity_dict
if 'SOCIAL_NEIGHBORS_SPARSE_MATRIX' in model.supply_set:
self.readSocialNeighbors()
self.generateSocialNeighborsSparseMatrix()
data_dict['SOCIAL_NEIGHBORS_INDICES_INPUT'] = self.social_neighbors_indices_list
data_dict['SOCIAL_NEIGHBORS_VALUES_INPUT'] = self.social_neighbors_values_list
data_dict['SOCIAL_NEIGHBORS_VALUES_WEIGHT_AVG_INPUT'] = self.social_neighbors_values_weight_avg_list
data_dict['SOCIAL_NEIGHBORS_NUM_INPUT'] = self.social_neighbor_num_list
data_dict['SOCIAL_NEIGHBORS_NUM_DICT_INPUT'] = self.social_neighbors_num_dict
data_dict['USER_USER_SPARSITY_DICT']= self.user_user_sparsity_dict
if 'ITEM_CUSTOMER_SPARSE_MATRIX' in model.supply_set:
self.generateConsumedItemsSparseMatrixForItemUser()
data_dict['ITEM_CUSTOMER_INDICES_INPUT'] = self.item_customer_indices_list
data_dict['ITEM_CUSTOMER_VALUES_INPUT'] = self.item_customer_values_list
data_dict['ITEM_CUSTOMER_VALUES_WEIGHT_AVG_INPUT'] = self.item_customer_values_weight_avg_list
data_dict['ITEM_CUSTOMER_NUM_INPUT'] = self.item_customer_num_list
data_dict['ITEM_USER_NUM_DICT_INPUT'] = self.item_user_num_dict
return data_dict
def initializeRankingTrain(self):
self.readData()
self.arrangePositiveData()
self.arrangePositiveDataForItemUser()
self.generateTrainNegative()
def initializeRankingVT(self):
self.readData()
self.arrangePositiveData()
self.arrangePositiveDataForItemUser()
self.generateTrainNegative()
def initalizeRankingEva(self):
self.readData()
self.getEvaPositiveBatch()
self.generateEvaNegative()
def linkedMap(self):
self.data_dict['USER_LIST'] = self.user_list
self.data_dict['ITEM_LIST'] = self.item_list
self.data_dict['LABEL_LIST'] = self.labels_list
def linkedRankingEvaMap(self):
self.data_dict['EVA_USER_LIST'] = self.eva_user_list
self.data_dict['EVA_ITEM_LIST'] = self.eva_item_list
####### Data Loading #######
def readData(self):
f = open(self.filename)
total_user_list = set()
hash_data = defaultdict(int)
for _, line in enumerate(f):
arr = line.split("\t")
hash_data[(int(arr[0]), int(arr[1]))] = 1
total_user_list.add(int(arr[0]))
self.total_user_list = list(total_user_list)
self.hash_data = hash_data
def arrangePositiveData(self):
positive_data = defaultdict(set)
user_item_num_dict = defaultdict(set)
total_data = set()
hash_data = self.hash_data
for (u, i) in hash_data:
total_data.add((u, i))
positive_data[u].add(i)
user_list = sorted(list(positive_data.keys()))
for u in range(self.conf.num_users):
user_item_num_dict[u] = len(positive_data[u])+1
self.positive_data = positive_data
self.user_item_num_dict = user_item_num_dict
self.user_item_num_for_sparsity_dict = user_item_num_for_sparsity_dict
self.total_data = len(total_data)
def Sparsity_analysis_for_user_item_network(self):
hash_data_for_user_item = self.hash_data
sparisty_user_item_dict = {}
def arrangePositiveDataForItemUser(self):
positive_data_for_item_user = defaultdict(set)
item_user_num_dict = defaultdict(set)
total_data_for_item_user = set()
hash_data_for_item_user = self.hash_data
for (u, i) in hash_data_for_item_user:
total_data_for_item_user.add((i, u))
positive_data_for_item_user[i].add(u)
item_list = sorted(list(positive_data_for_item_user.keys()))
for i in range(self.conf.num_items):
item_user_num_dict[i] = len(positive_data_for_item_user[i])+1
self.item_user_num_dict = item_user_num_dict
self.positive_data_for_item_user = positive_data_for_item_user
self.total_data_for_item_user = len(total_data_for_item_user)
# ----------------------
# This function designes for generating train/val/test negative
def generateTrainNegative(self):
num_items = self.conf.num_items
num_negatives = self.conf.num_negatives
negative_data = defaultdict(set)
total_data = set()
hash_data = self.hash_data
for (u, i) in hash_data:
total_data.add((u, i))
for _ in range(num_negatives):
j = np.random.randint(num_items)
while (u, j) in hash_data:
j = np.random.randint(num_items)
negative_data[u].add(j)
total_data.add((u, j))
self.negative_data = negative_data
self.terminal_flag = 1
# ----------------------
# This function designes for val/test set, compute loss
def getVTRankingOneBatch(self):
positive_data = self.positive_data
negative_data = self.negative_data
total_user_list = self.total_user_list
user_list = []
item_list = []
labels_list = []
for u in total_user_list:
user_list.extend([u] * len(positive_data[u]))
item_list.extend(positive_data[u])
labels_list.extend([1] * len(positive_data[u]))
user_list.extend([u] * len(negative_data[u]))
item_list.extend(negative_data[u])
labels_list.extend([0] * len(negative_data[u]))
self.user_list = np.reshape(user_list, [-1, 1])
self.item_list = np.reshape(item_list, [-1, 1])
self.labels_list = np.reshape(labels_list, [-1, 1])
# ----------------------
# This function designes for the training process
def getTrainRankingBatch(self):
positive_data = self.positive_data
negative_data = self.negative_data
total_user_list = self.total_user_list
index = self.index
batch_size = self.conf.training_batch_size
user_list, item_list, labels_list = [], [], []
if index + batch_size < len(total_user_list):
target_user_list = total_user_list[index:index+batch_size]
self.index = index + batch_size
else:
target_user_list = total_user_list[index:len(total_user_list)]
self.index = 0
self.terminal_flag = 0
for u in target_user_list:
user_list.extend([u] * len(positive_data[u]))
item_list.extend(list(positive_data[u]))
labels_list.extend([1] * len(positive_data[u]))
user_list.extend([u] * len(negative_data[u]))
item_list.extend(list(negative_data[u]))
labels_list.extend([0] * len(negative_data[u]))
self.user_list = np.reshape(user_list, [-1, 1])
self.item_list = np.reshape(item_list, [-1, 1])
self.labels_list = np.reshape(labels_list, [-1, 1])
# ----------------------
# This function is designed for the positive data
def getEvaPositiveBatch(self):
hash_data = self.hash_data
user_list = []
item_list = []
index_dict = defaultdict(list)
index = 0
for (u, i) in hash_data:
user_list.append(u)
item_list.append(i)
index_dict[u].append(index)
index = index + 1
self.eva_user_list = np.reshape(user_list, [-1, 1])
self.eva_item_list = np.reshape(item_list, [-1, 1])
self.eva_index_dict = index_dict
# ----------------------
#This function is designed for generating negative data
def generateEvaNegative(self):
hash_data = self.hash_data
total_user_list = self.total_user_list
num_evaluate = self.conf.num_evaluate
num_items = self.conf.num_items
eva_negative_data = defaultdict(list)
for u in total_user_list:
for _ in range(num_evaluate):
j = np.random.randint(num_items)
while (u, j) in hash_data:
j = np.random.randint(num_items)
eva_negative_data[u].append(j)
self.eva_negative_data = eva_negative_data
# ----------------------
#This function designs for generating negative batch in rating evaluation,
def getEvaRankingBatch(self):
batch_size = self.conf.evaluate_batch_size
num_evaluate = self.conf.num_evaluate
eva_negative_data = self.eva_negative_data
total_user_list = self.total_user_list
index = self.index
terminal_flag = 1
total_users = len(total_user_list)
user_list = []
item_list = []
if index + batch_size < total_users:
batch_user_list = total_user_list[index:index+batch_size]
self.index = index + batch_size
else:
terminal_flag = 0
batch_user_list = total_user_list[index:total_users]
self.index = 0
for u in batch_user_list:
user_list.extend([u]*num_evaluate)
item_list.extend(eva_negative_data[u])
self.eva_user_list = | np.reshape(user_list, [-1, 1]) | numpy.reshape |
import numpy as np
def hybrid_index(pcp, evapo, tau, lagmax, alpha, beta):
"""
c-----------------------------------------------------------------------
c
c Input variables and parameters:
c
c pcp: vector array of input precipitation time series.
c evapo: vector array of input non-precipitation effects on drought
c index (e.g., evapotranspiration).
c This time series is not used if parameter alpha is zero,
c thus giving a precipitation-only hybrid index.
c tau: e-folding time of exponentially weighted averages in the
c time units of the input time series pcp and evapo (e.g.,
c in months for the application in Chelton and Risien, 2020).
c lagmax: Maximum lag over which exponentially weighted averages are
c computed.
c Recommended value is 3 times the e-folding time tau.
c alpha: parameter specifying ratio of standard deviation of
c non-precipitation effects (evapo) to standard deviation of
c precipitation effects (pcp) in the simulation of the
c statistics of non-precipitation effects of evapo on the
c hybrid drought index from a random time series. The purpose
c of such simulations is to parameterize a PDSI or MCDI time
c series as in Appendix B of in Chelton and Risien (2020)
c This parameter is set to zero for a precipitation-only
c hybrid drought index.
c If the vector evapo contains actual non-precipitation
c effects calculated externally, rather than a random time
c series as the simulations in Chelton and Risien (2020),
c set alpha to -1.
c beta: parameter specifying ratio of future to past values of
c precipitation (pcp) and non-precipitation (evapo) effects
c in 2-sided exponentially weighted averages.
c Set this parameter to zero for a 1-sided exponentially
c weighted hybrid index.
c For 2-sided exponentially weighted averages, note that this
c code assumes that the exponentially decaying weights are
c the same for both negative and positive lags.
c
c Output variable:
c
c pcpexp: the hybrid drought index.
c
c Reference:
c <NAME>., and <NAME>, 2020: A hybrid precipitation
c index inspired by the SPI, PDSI and MCDI. Part 1: Development of
c the index. Journal of Hydrometeorology, DOI: 10.1175/JHM-D-19-0230.1.
c
c-----------------------------------------------------------------------
"""
ndat = len(pcp)
# Special case of no smoothing (tau=0):
if tau == 0:
pcpexp = pcp
return pcpexp
# Generate time series with evapotranspiration effects added.
# If parameter alpha=0, only load pcp data into array pcpevapo.
if alpha == 0:
# form input time series for precipitation-only hybrid index.
pcpevapo = pcp
elif alpha == -1:
# Add actual evapotranspiration effects in the vector evapo to
# the precipitation effects in the vector pcp.
pcpevapo = pcp + evapo
else:
"""
c Scale random time series evapo (intended to represent the
c statistical effects of evapotranspiration on drought
c variability) so that the evapo-to-pcp standard deviation
c ratio is alpha. Then form simulated drought index for
c calculations such as those in Appendix B of Chelton and
c Risien (2020) for the purposes of parameterizing a PDSI or
c MCDI time series.
"""
# when the second calling argument is 1 the result is
# weighted by the number of valid observations instead
# of by one less than that
varpcp = np.var(pcp)
sdevpcp = np.sqrt(varpcp)
varevapo = np.var(evapo)
sdevevapo = np.sqrt(varevapo)
sdevratio = sdevpcp / sdevevapo
factor = sdevratio * alpha
pcpevapo = np.squeeze(pcp + factor*np.transpose(evapo))
# Generate weights for exponentially weighted averages.
# For 2-sided exponentially weighted averages with beta .gt. 0,
# assume exponential weighting is symmetric about zero lag.
efold = 1 / tau
ii = np.arange(lagmax+1)
weight = np.exp(-efold * ii)
# normalize weights to sum to 1
weight = weight / np.sum(weight)
# generate the exponentially weighted average time series
pcpexp = [] # pre-allocate
#.. all conditional branches from the original fortran code are
#.. retained for clarity.
for ii in range(0,ndat):
if ii <= lagmax-1:
pcpexp.append(np.nan)
elif beta > 0 and ii > ndat-lagmax-1:
pcpexp.append(np.nan)
else:
pcpexpi = 0.0
for jj in range(0,lagmax+1):
imj = ii - jj
ipj = ii + jj
pcpexpi = pcpexpi + weight[jj] * pcpevapo[imj]
if beta!=0 and jj > 0:
pcpexpi = pcpexpi + beta * weight[jj] * pcpevapo[ipj]
pcpexp.append(pcpexpi)
return pcpexp
def cross_corr(x, y, nlags, bad_flag):
""" calculate cross correlations
[lag, R_xy, P_xy] = cross_corr(x, y, nlags, bad_flag)
input:
x,y the data sets
nlags are number of lags to process
bad_flag is data value that indicates missing or bad data
output:
lag is lag from -nlag to +nlag
R_xy are covariances.
P_xy are correlations
Subroutine computes correlation between a(t) and b(t+lag). A positive
lag therefore means that a(t) precedes b(t). In other words a(t) leads
b(t).
"""
N = len(x)
# initialize output
R_xy = []
P_xy = []
if np.isnan(bad_flag):
bad_flag=1e35
x[np.isnan(x)]= bad_flag
y[np.isnan(y)]= bad_flag
# do the lags
cnt = -1
for ll in range(-nlags,nlags+1):
cnt = cnt + 1
# check for neg./pos lag
if ll < 0:
k = -1 * ll
lag2_id = np.arange(0,N-k)
lag1_id = lag2_id + k
else:
k = ll
lag1_id = np.arange(0,N-k)
lag2_id = lag1_id + k
# find good data in x series
good_id = x[lag1_id]!=bad_flag
Ngoodx = sum(good_id)
# continue with this lag if ther are data
if Ngoodx>0:
lag1_id = lag1_id[good_id]
lag2_id = lag2_id[good_id]
# find good data in y-series where x series was good
good_id = y[lag2_id]!=bad_flag
Ngood = sum(good_id)
# continue only if there are data
if Ngood>0:
lag1_id = lag1_id[good_id]
lag2_id = lag2_id[good_id]
mean_1 = np.mean(x[lag1_id])
mean_2 = np.mean(y[lag2_id])
z1=x[lag1_id]-mean_1
z2=y[lag2_id]-mean_2
# get the normalizing variances
std_1 = np.sqrt(np.dot(z1, z1) / len(lag2_id))
std_2 = np.sqrt(np.dot(z2, z2) / len(lag2_id))
# estimate cov. and corr.
R_xy.append( | np.dot(z1, z2) | numpy.dot |
# -*- coding: utf-8 -*-
from unittest import TestCase
from unittest.mock import MagicMock, call, patch
import numpy as np
from btb.tuning.tunable import Tunable
from btb.tuning.tuners.base import BaseMetaModelTuner, BaseTuner, StopTuning
class TestBaseTuner(TestCase):
"""Test BaseTuner class."""
def test___init__defaults(self):
# setup
tunable = MagicMock(spec_set=Tunable)
# run
instance = BaseTuner(tunable)
# assert
assert instance.tunable is tunable
assert isinstance(instance.trials, np.ndarray)
assert isinstance(instance.raw_scores, np.ndarray)
assert isinstance(instance.scores, np.ndarray)
assert isinstance(instance._trials_set, set)
assert isinstance(instance.maximize, bool)
assert instance.maximize
assert instance.trials.shape == (0, 1)
assert instance.raw_scores.shape == (0, 1)
assert instance.trials.dtype == np.float
assert instance.raw_scores.dtype == np.float
def test___init__maximize_false(self):
# setup
tunable = MagicMock(spec_set=Tunable)
# run
instance = BaseTuner(tunable, False)
# assert
assert isinstance(instance.tunable, MagicMock)
assert isinstance(instance.trials, np.ndarray)
assert isinstance(instance.raw_scores, np.ndarray)
assert isinstance(instance.scores, np.ndarray)
assert isinstance(instance._trials_set, set)
assert isinstance(instance.maximize, bool)
assert not instance.maximize
def test__check_proposals_proposals_gt_cardinality(self):
"""Test that ``StopTuning`` is being raised if ``proposals`` is greater than
``self.tunable.cardinality``.
"""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
instance.tunable.cardinality = 4
# run / assert
with self.assertRaises(StopTuning):
BaseTuner._check_proposals(instance, 5)
def test__check_proposals_trials_eq_cardinality(self):
"""Test that ``StopTuning`` is being raised if ``self.trials`` is equal to
``self.tunable.cardinality``.
"""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
instance.tunable.cardinality = 2
instance._trials_set.__len__.return_value = 2
# run / assert
with self.assertRaises(StopTuning):
BaseTuner._check_proposals(instance, 1)
def test__check_proposals_trials_and_proposals_gt_cardinality(self):
"""Test that ``StopTuning`` is being raised if ``proposals`` + ``len(self.trials)``
is greater than ``self.tunable.cardinality``.
"""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
instance.tunable.cardinality = 4
instance._trials_set.__len__.return_value = 2
# run / assert
with self.assertRaises(StopTuning):
BaseTuner._check_proposals(instance, 3)
def test__check_proposals_not_raise(self):
"""Test that ``StopTuning`` is not being raised."""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
instance.tunable.cardinality = 4
instance._trials_set.__len__.return_value = 2
# run / assert
result = BaseTuner._check_proposals(instance, 1)
assert result is None
def test_propose_one_value_no_duplicates(self):
"""Test that ``propose`` method calls it's child implemented method with
``allow_duplicates`` as ``False``.
"""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
inverse_return = instance.tunable.inverse_transform.return_value
inverse_return.to_dict.return_value = [1]
instance._propose = MagicMock(return_value=1)
# run
result = BaseTuner.propose(instance, 1)
# assert
instance._check_proposals.assert_called_once_with(1)
instance._propose.assert_called_once_with(1, False)
instance.tunable.inverse_transform.called_once_with(1)
inverse_return.to_dict.assert_called_once_with(orient='records')
assert result == 1
@patch('btb.tuning.tuners.base.BaseTuner._check_proposals')
def test_propose_one_value_allow_duplicates(self, mock__check_proposals):
"""Test that ``propose`` method calls it's child implemented method with
``allow_duplicates`` as ``True``.
"""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
inverse_return = instance.tunable.inverse_transform.return_value
inverse_return.to_dict.return_value = [1]
instance._propose = MagicMock(return_value=1)
# run
result = BaseTuner.propose(instance, 1, allow_duplicates=True)
# assert
instance._check_proposals.assert_not_called()
instance._propose.assert_called_once_with(1, True)
instance.tunable.inverse_transform.called_once_with(1)
inverse_return.to_dict.assert_called_once_with(orient='records')
assert result == 1
@patch('btb.tuning.tuners.base.BaseTuner._check_proposals')
def test_propose_many_values_no_duplicates(self, mock__check_proposals):
"""Test that ``propose`` method calls it's child implemented method with more than one
proposals and ``allow_duplicates`` as ``False``.
"""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
inverse_return = instance.tunable.inverse_transform.return_value
inverse_return.to_dict.return_value = [1, 2]
instance._propose = MagicMock(return_value=2)
# run
result = BaseTuner.propose(instance, 2)
# assert
instance._propose.assert_called_once_with(2, False)
instance.tunable.inverse_transform.called_once_with(2)
inverse_return.to_dict.assert_called_once_with(orient='records')
assert result == [1, 2]
@patch('btb.tuning.tuners.base.BaseTuner._check_proposals')
def test_propose_many_values_allow_duplicates(self, mock__check_proposals):
"""Test that ``propose`` method calls it's child implemented method with more than one
proposals and ``allow_duplicates`` as ``True``.
"""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
inverse_return = instance.tunable.inverse_transform.return_value
inverse_return.to_dict.return_value = [1, 2]
instance._propose = MagicMock(return_value=2)
# run
result = BaseTuner.propose(instance, 2, allow_duplicates=True)
# assert
instance._propose.assert_called_once_with(2, True)
instance.tunable.inverse_transform.called_once_with(2)
inverse_return.to_dict.assert_called_once_with(orient='records')
assert result == [1, 2]
def test__sample_allow_duplicates(self):
"""Test the method ``_sample``when using duplicates."""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
instance.tunable.sample.return_value = 1
# run
result = BaseTuner._sample(instance, 1, True)
# assert
instance.tunable.sample.assert_called_once_with(1)
assert result == 1
def test__sample_not_allow_duplicates(self):
"""Test that the method ``_sample`` returns ``np.ndarray`` when not using duplicates."""
# setup
instance = MagicMock()
instance._trials_set = set()
instance.tunable = MagicMock(spec_set=Tunable)
instance.tunable.sample.return_value = np.array([[3]])
# run
result = BaseTuner._sample(instance, 1, False)
# assert
instance.tunable.sample.assert_called_once_with(1)
np.testing.assert_array_equal(result, np.array([[3]]))
def test_sample_no_duplicates_more_than_one_loop(self):
"""Test that the method ``_sample`` returns ``np.ndarray`` when not using duplicates and
perfroms more than one iteration.
"""
# setup
instance = MagicMock()
instance.tunable = MagicMock(spec_set=Tunable)
instance._trials_set = set({(1, ), (2, )})
side_effect = [np.array([[3]]), np.array([[1]]), | np.array([[1]]) | numpy.array |
import numpy as np
from scipy.integrate import solve_ivp
from utils import reshape_pt1, reshape_pt1_tonormal, reshape_dim1
# Possible dynamics (take t,x,u and return dx/dt) and solver for testing
# Input x, u, version and parameters, output x at the next step (dt
# later) with scipy ODE solver
def dynamics_traj(x0, u, t0, dt, init_control, discrete=False, version=None,
meas_noise_var=0, process_noise_var=0, method='RK45',
t_span=[0, 1], t_eval=[0.1], solver_options={}, **kwargs):
x0 = reshape_pt1(x0)
if discrete:
xtraj = np.zeros((len(t_eval), x0.shape[1]))
xtraj[0] = reshape_pt1(x0)
t = t0
i = 0
while (i < len(t_eval) - 1) and (t < t_eval[-1]):
i += 1
xnext = reshape_pt1(
version(t, xtraj[-1], u, t0, init_control, process_noise_var,
**kwargs))
xtraj[i] = xnext
t += dt
else:
sol = solve_ivp(
lambda t, x: version(t, x, u, t0, init_control, process_noise_var,
**kwargs), t_span=t_span,
y0=reshape_pt1_tonormal(x0), method=method, t_eval=t_eval,
**solver_options)
xtraj = reshape_pt1(sol.y.T)
if meas_noise_var != 0:
xtraj += np.random.normal(0, np.sqrt(meas_noise_var), xtraj.shape)
return reshape_pt1(xtraj)
# Dynamics of the continuous time Duffing oscillator, with control law u(t)
def duffing_dynamics(t, x, u, t0, init_control, process_noise_var, kwargs):
alpha = kwargs.get('alpha')
beta = kwargs.get('beta')
delta = kwargs.get('delta')
x = reshape_pt1(x)
u = reshape_pt1(u(t, kwargs, t0, init_control))
A = reshape_pt1([[0, 1], [-alpha, -delta]])
F1 = reshape_dim1( | np.zeros_like(x[:, 0]) | numpy.zeros_like |
"""
Test for file IO
"""
import importlib.util
from pathlib import Path
import numpy as np
from biorbd_optim import Data
from .utils import TestUtils
# Load static_arm
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"static_arm", str(PROJECT_FOLDER) + "/examples/muscle_driven_ocp/static_arm.py",
)
static_arm = importlib.util.module_from_spec(spec)
spec.loader.exec_module(static_arm)
# Load static_arm_with_contact
spec = importlib.util.spec_from_file_location(
"static_arm_with_contact", str(PROJECT_FOLDER) + "/examples/muscle_driven_ocp/static_arm_with_contact.py",
)
static_arm_with_contact = importlib.util.module_from_spec(spec)
spec.loader.exec_module(static_arm_with_contact)
# Load contact_forces_inequality_constraint_muscle_excitations
spec = importlib.util.spec_from_file_location(
"contact_forces_inequality_constraint_muscle_excitations",
str(PROJECT_FOLDER)
+ "/examples/muscle_driven_with_contact/contact_forces_inequality_constraint_muscle_excitations.py",
)
contact_forces_inequality_constraint_muscle_excitations = importlib.util.module_from_spec(spec)
spec.loader.exec_module(contact_forces_inequality_constraint_muscle_excitations)
def test_muscle_driven_ocp():
ocp = static_arm.prepare_ocp(
str(PROJECT_FOLDER) + "/examples/muscle_driven_ocp/arm26.bioMod", final_time=2, number_shooting_points=10
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
| np.testing.assert_equal(f.shape, (1, 1)) | numpy.testing.assert_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mdtraj as md
import numpy as np
import sys
def get_angles_dcd(fname1):
traj = md.load_dcd(fname1, top = 'ala2.pdb')
atoms, bonds = traj.topology.to_dataframe()
psi_indices, phi_indices = [6, 8, 14, 16], [4, 6, 8, 14]
arr_angles = md.compute_dihedrals(traj, [phi_indices, psi_indices])
return arr_angles
def get_angles_xtc(fname1):
traj = md.load_xtc(fname1, top = 'ala2.pdb')
atoms, bonds = traj.topology.to_dataframe()
psi_indices, phi_indices = [6, 8, 14, 16], [4, 6, 8, 14]
arr_angles = md.compute_dihedrals(traj, [phi_indices, psi_indices])
return arr_angles
if __name__ == "__main__":
argvs = sys.argv
fname1_list = ['../trajectory-'+str(i)+'.dcd' for i in range(2, 9)]
fname2_list = ['../trajectory-'+str(g)+'.xtc' for g in range(1, 6)]
angles = get_angles_dcd('../trajectory-1.dcd')
for fname1 in fname1_list:
angles_new = get_angles_dcd(fname1)
angles = np.concatenate([angles, angles_new], axis=0)
for fname2 in fname2_list:
angles_new = get_angles_xtc(fname2)
angles = np.concatenate([angles, angles_new], axis=0)
print(angles.shape) ##(80000, 2)
angles = np.delete(angles, 0, 0)
angles = np.delete(angles, 1, 0)
angles = np.delete(angles, 2, 0)
angles = | np.delete(angles, 3, 0) | numpy.delete |
"""Film Mode Matching Mode Solver
Implementation of the Film Mode Matching (FMM) algorithm, as described in:
- Sudbo, "Film mode matching a versatile numerical method for vector mode field calculations in dielectric waveguides", Pure App. Optics, 2 (1993), 211-233
- Sudbo, "Improved formulation of the film mode matching method for mode field calculations in dielectric waveguides", Pure App. Optics, 3 (1994), 381-388
Examples
========
See L{FMM1d} and L{FMM2d}.
"""
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
from functools import reduce
__author__ = '<NAME> & <NAME>'
import numpy
import scipy
import scipy.optimize
import copy
import EMpy.utils
from EMpy.modesolvers.interface import *
import pylab
class Message(object):
def __init__(self, msg, verbosity=0):
self.msg = msg
self.verbosity = verbosity
def show(self, verbosity=0):
if self.verbosity <= verbosity:
print((self.verbosity - 1) * '\t' + self.msg)
class Struct(object):
"""Empty class to fill with whatever I want. Maybe a dictionary would do?"""
pass
class Boundary(object):
"""Boundary conditions.
Electric and Magnetic boundary conditions are translated to Symmetric
and Antisymmetric for each field.
@ivar xleft: Left bc on x.
@ivar xright: Right bc on x.
@ivar yleft: Left bc on y.
@ivar yright: Right bc on y.
"""
def __init__(self, xleft='Electric Wall',
yleft='Magnetic Wall',
xright='Electric Wall',
yright='Magnetic Wall'):
"""Set the boundary conditions, validate and translate."""
self.xleft = xleft
self.yleft = yleft
self.xright = xright
self.yright = yright
self.validate()
self.translate()
def validate(self):
"""Validate the input.
@raise ValueError: Unknown boundary.
"""
if not reduce(lambda x, y: x & y,
[(x == 'Electric Wall') | (x == 'Magnetic Wall') for x in [self.xleft, self.yleft, self.xright, self.yright]]):
raise ValueError('Unknown boundary.')
def translate(self):
"""Translate for each field.
@raise ValueError: Unknown boundary.
"""
self.xh = ''
self.xe = ''
self.yh = ''
self.ye = ''
if self.xleft == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xleft == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.xright == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xright == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yleft == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yleft == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yright == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yright == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
def __str__(self):
return 'xleft = %s, xright = %s, yleft = %s, yright = %s' % (self.xleft, self.xright, self.yleft, self.yright)
class Slice(object):
"""One dimensional arrangement of layers and 1d modes.
A slice is made of a stack of layers, i.e. refractive indeces with a thickness,
with given boundary conditions.
It holds 1d modes, both TE and TM.
@ivar x1: start point of the slice in x.
@ivar x2: end point of the slice in x.
@ivar Uy: array of points delimiting the layers.
@ivar boundary: boundary conditions.
@ivar modie: E modes.
@ivar modih: H modes.
@ivar Ux: array of points delimiting the slices in x (internally set).
@ivar refractiveindex: refractive index of all the slices (internally set).
@ivar epsilon: epsilon of all the slices (internally set).
@ivar wl: vacuum wavelength.
"""
def __init__(self, x1, x2, Uy, boundary, modie, modih):
self.x1 = x1
self.x2 = x2
self.Uy = Uy
self.boundary = boundary
self.modie = modie
self.modih = modih
def __str__(self):
return 'x1 = %g, x2 = %g\nUy = %s\nboundary = %s' % (self.x1, self.x2, self.Uy, self.boundary)
class FMMMode1d(Mode):
"""One dimensional mode.
Note
====
Virtual class.
"""
pass
class FMMMode1dx(FMMMode1d):
"""Matching coefficients in the x-direction.
L{FMMMode1dy}s are weighted by these coefficients to assure continuity.
"""
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.U.__str__())
class FMMMode1dy(FMMMode1d):
"""One dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
Note
====
The mode is suppose one dimensional, in the y direction.
@ivar sl: array of value of the mode at the lhs of each slice.
@ivar sr: array of value of the mode at the rhs of each slice.
@ivar al: array of value of the derivative of the mode at the lhs of each slice.
@ivar ar: array of value of the derivative of the mode at the lhs of each slice.
@ivar k: wavevector inside each layer.
@ivar keff: effective wavevector.
@ivar zero: how good the mode is? it must be as close to zero as possible!
@ivar Uy: array of points delimiting the layers.
"""
def eval(self, y_):
"""Evaluate the mode at y."""
y = numpy.atleast_1d(y_)
ny = len(y)
f = numpy.zeros(ny, dtype=complex)
for iU in range(len(self.U) - 1):
k = self.k[iU]
sl = self.sl[iU]
al = self.al[iU]
Ul = self.U[iU]
Ur = self.U[iU+1]
idx = numpy.where((Ul <= y) & (y <= Ur))
yy = y[idx] - Ul
f[idx] = sl * numpy.cos(k * yy) + al * sinxsux(k * yy) * yy
return f
def plot(self, y):
f = self.eval(y)
pylab.plot(y, numpy.real(f), y, numpy.imag(y))
pylab.legend(('real', 'imag'))
pylab.xlabel('y')
pylab.ylabel('mode1d')
pylab.show()
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nkeff = %s\nzero = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.keff.__str__(),
self.zero.__str__(),
self.U.__str__())
class FMMMode2d(Mode):
"""Two dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
"""
def get_x(self, n=100):
return numpy.linspace(self.slicesx[0].Ux[0], self.slicesx[0].Ux[-1], n)
def get_y(self, n=100):
return numpy.linspace(self.slicesx[0].Uy[0], self.slicesx[0].Uy[-1], n)
def eval(self, x_=None, y_=None):
"""Evaluate the mode at x,y."""
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
nmodi = len(self.modie)
lenx = len(x)
leny = len(y)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
uh = numpy.zeros((nmodi, lenx), dtype=complex)
ue = numpy.zeros_like(uh)
udoth = numpy.zeros_like(uh)
udote = numpy.zeros_like(uh)
Exsh = numpy.zeros((leny, nmodi), dtype=complex)
Exah = numpy.zeros_like(Exsh)
Exse = numpy.zeros_like(Exsh)
Exae = numpy.zeros_like(Exsh)
Eysh = numpy.zeros_like(Exsh)
Eyah = numpy.zeros_like(Exsh)
Eyse = numpy.zeros_like(Exsh)
Eyae = numpy.zeros_like(Exsh)
Ezsh = numpy.zeros_like(Exsh)
Ezah = numpy.zeros_like(Exsh)
Ezse = numpy.zeros_like(Exsh)
Ezae = numpy.zeros_like(Exsh)
cBxsh = numpy.zeros_like(Exsh)
cBxah = numpy.zeros_like(Exsh)
cBxse = numpy.zeros_like(Exsh)
cBxae = numpy.zeros_like(Exsh)
cBysh = numpy.zeros_like(Exsh)
cByah = numpy.zeros_like(Exsh)
cByse = numpy.zeros_like(Exsh)
cByae = numpy.zeros_like(Exsh)
cBzsh = numpy.zeros_like(Exsh)
cBzah = numpy.zeros_like(Exsh)
cBzse = numpy.zeros_like(Exsh)
cBzae = numpy.zeros_like(Exsh)
ExTE = numpy.zeros((leny,lenx), dtype=complex)
EyTE = | numpy.zeros_like(ExTE) | numpy.zeros_like |
"""This module contains helper functions and utilities for nelpy."""
__all__ = ['spatial_information',
'frange',
'swap_cols',
'swap_rows',
'pairwise',
'is_sorted',
'linear_merge',
'PrettyDuration',
'ddt_asa',
'get_contiguous_segments',
'get_events_boundaries',
'get_threshold_crossing_epochs',
'_bst_get_bins']
import numpy as np
import logging
from itertools import tee, repeat
from collections import namedtuple
from math import floor
from scipy.signal import hilbert
import scipy.ndimage.filters #import gaussian_filter1d, gaussian_filter
from numpy import log, ceil
import copy
import sys
import ctypes
from multiprocessing import Array, cpu_count
from multiprocessing.pool import Pool
import pdb
from . import core # so that core.RegularlySampledAnalogSignalArray is exposed
from . import auxiliary # so that auxiliary.TuningCurve1D is epxosed
from . import filtering
from .utils_.decorators import keyword_deprecation
# def sub2ind(array_shape, rows, cols):
# ind = rows*array_shape[1] + cols
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# return ind
# def ind2sub(array_shape, ind):
# # see also np.unravel_index(ind, array.shape)
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# rows = (ind.astype('int') / array_shape[1])
# cols = ind % array_shape[1]
# return (rows, cols)
def ragged_array(arr):
"""Takes a list of arrays, and returns a ragged array.
See https://github.com/numpy/numpy/issues/12468
"""
n_elem = len(arr)
out = np.array(n_elem*[None])
for ii in range(out.shape[0]):
out[ii] = arr[ii]
return out
def asa_indices_within_epochs(asa, intervalarray):
"""Return indices of ASA within epochs.
[[start, stop]
...
[start, stop]]
so that data can be associated with asa._data[:,start:stop] for each epoch.
"""
indices = []
intervalarray = intervalarray[asa.support]
for interval in intervalarray.merge().data:
a_start = interval[0]
a_stop = interval[1]
frm, to = np.searchsorted(asa._abscissa_vals, (a_start, a_stop))
indices.append((frm, to))
indices = np.array(indices, ndmin=2)
return indices
def frange(start, stop, step):
"""arange with floating point step"""
# TODO: this function is not very general; we can extend it to work
# for reverse (stop < start), empty, and default args, etc.
# there are also many edge cases where this is weird.
# see https://stackoverflow.com/questions/7267226/range-for-floats
# for better alternatives.
num_steps = int(np.floor((stop-start)/step))
return np.linspace(start, stop, num=num_steps, endpoint=False)
def spatial_information(ratemap):
"""Compute the spatial information and firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
<NAME>., <NAME>., <NAME>., <NAME>.,
and <NAME>. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
"""
ratemap = copy.deepcopy(ratemap)
# ensure that the ratemap always has nonzero firing rates,
# otherwise the spatial information might return NaNs:
bkg_rate = ratemap[ratemap>0].min()
ratemap[ratemap < bkg_rate] = bkg_rate
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = np.transpose(ratemap, (2,1,0))
si = np.sum(np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1), axis=1)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
si = np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1)
else:
raise TypeError("rate map shape not supported / understood!")
return si/number_of_spatial_bins
def spatial_sparsity(ratemap):
"""Compute the firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
<NAME>., <NAME>., <NAME>., <NAME>.,
and <NAME>. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
occupancy : array of shape (n_bins,)
Occupancy of the animal.
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
sparsity: array of shape (n_units,)
sparsity (in percent) for each unit
"""
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = ratemap
sparsity = np.sum(np.sum((Ri*Pi), axis=1), axis=1)/(R**2)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
sparsity = np.sum((Pi*Ri.T), axis=1)/(R**2)
else:
raise TypeError("rate map shape not supported / understood!")
return sparsity/number_of_spatial_bins
def _bst_get_bins_inside_interval(interval, ds, w=1):
"""(np.array) Return bin edges entirely contained inside an interval.
Bin edges always start at interval.start, and continue for as many
bins as would fit entirely inside the interval.
NOTE 1: there are (n+1) bin edges associated with n bins.
WARNING: if an interval is smaller than ds, then no bin will be
associated with the particular interval.
NOTE 2: nelpy uses half-open intervals [a,b), but if the bin
width divides b-a, then the bins will cover the entire
range. For example, if interval = [0,2) and ds = 1, then
bins = [0,1,2], even though [0,2] is not contained in
[0,2). There might be numerical precision deviations from this?
Parameters
----------
interval : EpochArray
EpochArray containing a single interval with a start, and stop
ds : float
Time bin width, in seconds.
w : number of bins to use in a sliding window mode. Default is 1 (no sliding window).
For example, 40 ms bins, with a stride of 5 ms, can be achieved by using (ds=0.005, w=8)
For now, w has to be an integer, and therefore 5 second bins, with a stride of 2 seconds
are not supported within this framework.
Returns
-------
bins : array
Bin edges in an array of shape (n+1,) where n is the number
of bins
centers : array
Bin centers in an array of shape (n,) where n is the number
of bins
"""
if interval.length < ds:
return None, None
n_bins = int(np.floor(interval.length / ds)) # number of bins
# linspace is better than arange for non-integral steps
bins = np.linspace(interval.start, interval.start + n_bins*ds, n_bins+1)
if w > 1:
wn_bins = np.max((1, n_bins - w + 1))
wn_bins = bins[:wn_bins+1] + w/2*ds - ds/2
bins = wn_bins
centers = bins[:-1] + (ds / 2)
return bins, centers
def _bst_get_bins(intervalArray, ds, w=1):
"""
Docstring goes here. TBD. For use with bins that are contained
wholly inside the intervals.
"""
b = [] # bin list
c = [] # centers list
left_edges = []
right_edges = []
counter = 0
for interval in intervalArray:
bins, centers = _bst_get_bins_inside_interval(interval=interval, ds=ds, w=w)
if bins is not None:
left_edges.append(counter)
counter += len(centers) - 1
right_edges.append(counter)
counter += 1
b.extend(bins.tolist())
c.extend(centers.tolist())
bins = np.array(b)
bin_centers = np.array(c)
le = np.array(left_edges)
le = le[:, np.newaxis]
re = np.array(right_edges)
re = re[:, np.newaxis]
binned_support = np.hstack((le, re))
lengths = np.atleast_1d((binned_support[:,1] - binned_support[:,0] + 1).squeeze())
support_starts = bins[np.insert(np.cumsum(lengths+1),0,0)[:-1]]
support_stops = bins[np.insert(np.cumsum(lengths+1)-1,0,0)[1:]]
supportdata = np.vstack([support_starts, support_stops]).T
support = type(intervalArray)(supportdata) # set support to TRUE bin support
return bins, bin_centers, binned_support, support
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def get_mua(st, ds=None, sigma=None, truncate=None, _fast=True):
"""Compute the multiunit activity (MUA) from a spike train.
Parameters
----------
st : SpikeTrainArray
SpikeTrainArray containing one or more units.
-- OR --
st : BinnedSpikeTrainArray
BinnedSpikeTrainArray containing multiunit activity.
ds : float, optional
Time step in which to bin spikes. Default is 1 ms.
sigma : float, optional
Standard deviation (in seconds) of Gaussian smoothing kernel.
Default is 10 ms. If sigma==0 then no smoothing is applied.
truncate : float, optional
Bandwidth of the Gaussian filter. Default is 6.
Returns
-------
mua : AnalogSignalArray
AnalogSignalArray with MUA.
"""
if ds is None:
ds = 0.001 # 1 ms bin size
if sigma is None:
sigma = 0.01 # 10 ms standard deviation
if truncate is None:
truncate = 6
if isinstance(st, core.EventArray):
# bin spikes, so that we can count the spikes
mua_binned = st.bin(ds=ds).flatten()
elif isinstance(st, core.BinnedEventArray):
mua_binned = st.flatten()
ds = mua_binned.ds
else:
raise TypeError('st has to be one of (SpikeTrainArray, BinnedSpikeTrainArray)')
# make sure data type is float, so that smoothing works, and convert to rate
mua_binned._data = mua_binned._data.astype(float) / ds
# TODO: now that we can simply cast from BST to ASA and back, the following logic could be simplified:
# put mua rate inside an AnalogSignalArray
if _fast:
mua = core.AnalogSignalArray([], empty=True)
mua._data = mua_binned.data
mua._abscissa_vals = mua_binned.bin_centers
mua._abscissa.support = mua_binned.support
else:
mua = core.AnalogSignalArray(mua_binned.data, timestamps=mua_binned.bin_centers, fs=1/ds)
mua._fs = 1/ds
if (sigma != 0) and (truncate > 0):
mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)
return mua
def is_odd(n):
"""Returns True if n is odd, and False if n is even.
Assumes integer.
"""
return bool(n & 1)
def swap_cols(arr, frm, to):
"""swap columns of a 2D np.array"""
if arr.ndim > 1:
arr[:,[frm, to]] = arr[:,[to, frm]]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def swap_rows(arr, frm, to):
"""swap rows of a 2D np.array"""
if arr.ndim > 1:
arr[[frm, to],:] = arr[[to, frm],:]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def pairwise(iterable):
"""returns a zip of all neighboring pairs.
This is used as a helper function for is_sorted.
Example
-------
>>> mylist = [2, 3, 6, 8, 7]
>>> list(pairwise(mylist))
[(2, 3), (3, 6), (6, 8), (8, 7)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def argsort(seq):
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key=seq.__getitem__)
def is_sorted_general(iterable, key=lambda a, b: a <= b):
"""Check to see if iterable is monotonic increasing (sorted)."""
return all(key(a, b) for a, b in pairwise(iterable))
def is_sorted(x, chunk_size=None):
"""Returns True if iterable is monotonic increasing (sorted).
NOTE: intended for 1D array, list or tuple. Will not work on
more than 1D
This function works in-core with memory footrpint XXX.
chunk_size = 100000 is probably a good choice.
"""
if not isinstance(x, (tuple, list, np.ndarray)):
raise TypeError("Unsupported type {}".format(type(x)))
x = np.atleast_1d(np.array(x).squeeze())
if x.ndim > 1:
raise ValueError("Input x must be 1-dimensional")
if chunk_size is None:
chunk_size = 500000
stop = x.size
for chunk_start in range(0, stop, chunk_size):
chunk_stop = int(min(stop, chunk_start + chunk_size + 1))
chunk = x[chunk_start:chunk_stop]
if not np.all(chunk[:-1] <= chunk[1:]):
return False
return True
def linear_merge(list1, list2):
"""Merge two SORTED lists in linear time.
UPDATED TO WORK WITH PYTHON 3.7+ (see https://stackoverflow.com/questions/51700960/runtimeerror-generator-raised-stopiteration-every-time-i-try-to-run-app)
Returns a generator of the merged result.
Examples
--------
>>> a = [1, 3, 5, 7]
>>> b = [2, 4, 6, 8]
>>> [i for i in linear_merge(a, b)]
[1, 2, 3, 4, 5, 6, 7, 8]
>>> [i for i in linear_merge(b, a)]
[1, 2, 3, 4, 5, 6, 7, 8]
>>> a = [1, 2, 2, 3]
>>> b = [2, 2, 4, 4]
>>> [i for i in linear_merge(a, b)]
[1, 2, 2, 2, 2, 3, 4, 4]
"""
# if any of the lists are empty, return the other (possibly also
# empty) list: (this is necessary because having either list1 or
# list2 be empty makes this quite a bit more complicated...)
if isinstance(list1, (list, np.ndarray)):
if len(list1) == 0:
list2 = iter(list2)
while True:
try:
yield next(list2)
except StopIteration:
return
if isinstance(list2, (list, np.ndarray)):
if len(list2) == 0:
list1 = iter(list1)
while True:
try:
yield next(list1)
except StopIteration:
return
list1 = iter(list1)
list2 = iter(list2)
value1 = next(list1)
value2 = next(list2)
# We'll normally exit this loop from a next() call raising
# StopIteration, which is how a generator function exits anyway.
while True:
if value1 <= value2:
# Yield the lower value.
try:
yield value1
except StopIteration:
return
try:
# Grab the next value from list1.
value1 = next(list1)
except StopIteration:
# list1 is empty. Yield the last value we received from list2, then
# yield the rest of list2.
try:
yield value2
except StopIteration:
return
while True:
try:
yield next(list2)
except StopIteration:
return
else:
try:
yield value2
except StopIteration:
return
try:
value2 = next(list2)
except StopIteration:
# list2 is empty.
try:
yield value1
except StopIteration:
return
while True:
try:
yield next(list1)
except StopIteration:
return
def get_mua_events(mua, fs=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None):
"""Determine MUA/PBEs from multiunit activity.
MUA : multiunit activity
PBE : population burst event
Parameters
----------
mua : AnalogSignalArray
AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
fs : float, optional
Sampling frequency of mua, in Hz. If not specified, it will be inferred from
mua.fs
minLength : float, optional
maxLength : float, optional
PrimaryThreshold : float, optional
SecondaryThreshold : float, optional
minThresholdLength : float, optional
Returns
-------
mua_epochs : EpochArray
EpochArray containing all the MUA events / PBEs.
Example
-------
mua = get_mua(spiketrain)
mua_epochs = get_mua_events(mua)
PBEs = get_PBEs(spiketrain, min_active=5)
= get_PBEs(get_mua_events(get_mua(*)), spiketrain, min_active=5)
"""
if fs is None:
fs = mua.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in mua!")
if PrimaryThreshold is None:
PrimaryThreshold = mua.mean() + 3*mua.std()
if SecondaryThreshold is None:
SecondaryThreshold = mua.mean()
if minLength is None:
minLength = 0.050 # 50 ms minimum event duration
if maxLength is None:
maxLength = 0.750 # 750 ms maximum event duration
if minThresholdLength is None:
minThresholdLength = 0.0
# determine MUA event bounds:
mua_bounds_idx, maxes, _ = get_events_boundaries(
x = mua.data,
PrimaryThreshold = PrimaryThreshold,
SecondaryThreshold = SecondaryThreshold,
minThresholdLength = minThresholdLength,
minLength = minLength,
maxLength = maxLength,
ds = 1/fs
)
if len(mua_bounds_idx) == 0:
logging.warning("no mua events detected")
return core.EpochArray(empty=True)
# store MUA bounds in an EpochArray
mua_epochs = core.EpochArray(mua.time[mua_bounds_idx])
return mua_epochs
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def get_PBEs(data, fs=None, ds=None, sigma=None, truncate=None, unsorted_id=0,
min_active=None, minLength=None, maxLength=None,
PrimaryThreshold=None, minThresholdLength=None,
SecondaryThreshold=None):
"""Determine PBEs from multiunit activity or spike trains.
Definitions
-----------
MUA : multiunit activity
PBE : population burst event
Summary
-------
This function can be used to identify PBE epochs from spike trains, binned
spike trains, or multiunit activity (in the form of an AnalogSignalArray).
It is recommended to either pass in a SpikeTrainArray or a
BinnedSpikeTrainArray, so that a `min_active` number of sorted units can be
set.
It is also recommended that the unsorted units (but not noise artifacts!)
should be included in the spike train that is used to estimate the PBEs. By
default, unit_id=0 is assumed to be unsorted, but this can be changed, or if
no unsorted units are present, you can set unsorted_id=None. Equivalently,
if min_active=0, then no restriction will apply, and the unsorted_id will
have no effect on the final PBE epochs.
Examples
--------
PBE_epochs = get_PBEs(mua_asa)
PBE_epochs = get_PBEs(spiketrain, min_active=5)
PBE_epochs = get_PBEs(binnedspiketrain, min_active=5)
Parameters
----------
data : AnalogSignalArray
AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
-- OR --
data : SpikeTrainArray
SpikeTrainArray with multiple units, including unsorted unit(s), but
excluding any noise artifects.
-- OR --
data : BinnedSpikeTrainArray
BinnedSpikeTrainArray containing multiunit activity.
fs : float, optional
Sampling frequency of mua, in Hz. If not specified, it will be inferred
from data.
ds : float, optional
Time step in which to bin spikes. Default is 1 ms.
sigma : float, optional
Standard deviation (in seconds) of Gaussian smoothing kernel.
Default is 10 ms. If sigma==0 then no smoothing is applied.
truncate : float, optional
Bandwidth of the Gaussian filter. Default is 6.
unsorted_id : int, optional
unit_id of the unsorted unit. Default is 0. If no unsorted unit is
present, then set unsorted_id = None
min_active : int, optional
Minimum number of active units per event, excluding unsorted unit.
Default is 5.
minLength : float, optional
Minimum event duration in seconds. Default is 50 ms.
maxLength : float, optional
Maximum event duration in seconds. Default is 750 ms.
PrimaryThreshold : float, optional
Primary threshold to exceed. Default is mean() + 3*std()
SecondaryThreshold : float, optional
Secondary threshold to fall back to. Default is mean().
minThresholdLength : float, optional
Minimum duration to stay above PrimaryThreshold. Default is 0 ms.
Returns
-------
PBE_epochs : EpochArray
EpochArray containing all the PBEs.
Future improvements
-------------------
As of now, it is possible, but not easy to specify the Primary and Secondary
thresholds for event detection. A slight change in API might be needed to
make this specification more flexible.
"""
if sigma is None:
sigma = 0.01 # 10 ms standard deviation
if truncate is None:
truncate = 6
if isinstance(data, core.AnalogSignalArray):
# if we have only mua, then we cannot set (ds, unsorted_id, min_active)
if ds is not None:
raise ValueError('if data is an AnalogSignalArray then ds cannot be specified!')
if unsorted_id:
raise ValueError('if data is an AnalogSignalArray then unsorted_id cannot be specified!')
if min_active is not None:
raise ValueError('if data is an AnalogSignalArray then min_active cannot be specified!')
mua = data
mua._data = mua._data.astype(float)
if (sigma != 0) and (truncate > 0):
mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)
elif isinstance(data, (core.EventArray, core.BinnedEventArray)):
# set default parameter values:
if ds is None:
ds = 0.001 # default 1 ms
if min_active is None:
min_active = 5
mua = get_mua(data, ds=ds, sigma=sigma, truncate=truncate, _fast=True)
else:
raise TypeError('data has to be one of (AnalogSignalArray, SpikeTrainArray, BinnedSpikeTrainArray)')
# set default parameter values:
if fs is None:
fs = mua.fs
if minLength is None:
minLength = 0.050 # 50 ms minimum event duration
if maxLength is None:
maxLength = 0.750 # 750 ms maximum event duration
if minThresholdLength is None:
minThresholdLength = 0.0
# if PrimaryThreshold is None:
# PrimaryThreshold =
# if SecondaryThreshold is None:
# SecondaryThreshold =
PBE_epochs = get_mua_events(mua=mua,
fs=fs,
minLength=minLength,
maxLength=maxLength,
PrimaryThreshold=PrimaryThreshold,
minThresholdLength=minThresholdLength,
SecondaryThreshold=SecondaryThreshold)
# now require min_active number of sorted cells
if isinstance(data, (core.EventArray, core.BinnedEventArray)):
if min_active > 0:
if unsorted_id is not None:
# remove unsorted unit, if present:
unit_ids = copy.deepcopy(data.unit_ids)
try:
unit_ids.remove(unsorted_id)
except ValueError:
pass
# data_ = data._unit_subset(unit_ids)
data_ = data.loc[:,unit_ids]
else:
data_ = data
# determine number of active units per epoch:
n_active = np.array([snippet.n_active for snippet in data_[PBE_epochs]])
active_epochs_idx = np.argwhere(n_active > min_active).squeeze()
# only keep those epochs where sufficiently many units are active:
PBE_epochs = PBE_epochs[active_epochs_idx]
return PBE_epochs
def get_contiguous_segments(data, *, step=None, assume_sorted=None,
in_core=True, index=False, inclusive=False,
fs=None, sort=None, in_memory=None):
"""Compute contiguous segments (seperated by step) in a list.
Note! This function requires that a sorted list is passed.
It first checks if the list is sorted O(n), and only sorts O(n log(n))
if necessary. But if you know that the list is already sorted,
you can pass assume_sorted=True, in which case it will skip
the O(n) check.
Returns an array of size (n_segments, 2), with each row
being of the form ([start, stop]) [inclusive, exclusive].
NOTE: when possible, use assume_sorted=True, and step=1 as explicit
arguments to function call.
WARNING! Step is robustly computed in-core (i.e., when in_core is
True), but is assumed to be 1 when out-of-core.
Example
-------
>>> data = [1,2,3,4,10,11,12]
>>> get_contiguous_segments(data)
([1,5], [10,13])
>>> get_contiguous_segments(data, index=True)
([0,4], [4,7])
Parameters
----------
data : array-like
1D array of sequential data, typically assumed to be integral (sample
numbers).
step : float, optional
Expected step size for neighboring samples. Default uses numpy to find
the median, but it is much faster and memory efficient to explicitly
pass in step=1.
assume_sorted : bool, optional
If assume_sorted == True, then data is not inspected or re-ordered. This
can be significantly faster, especially for out-of-core computation, but
it should only be used when you are confident that the data is indeed
sorted, otherwise the results from get_contiguous_segments will not be
reliable.
in_core : bool, optional
If True, then we use np.diff which requires all the data to fit
into memory simultaneously, otherwise we use groupby, which uses
a generator to process potentially much larger chunks of data,
but also much slower.
index : bool, optional
If True, the indices of segment boundaries will be returned. Otherwise,
the segment boundaries will be returned in terms of the data itself.
Default is False.
inclusive : bool, optional
If True, the boundaries are returned as [(inclusive idx, inclusive idx)]
Default is False, and can only be used when index==True.
Deprecated
----------
in_memory : bool, optional
This is equivalent to the new 'in-core'.
sort : bool, optional
This is equivalent to the new 'assume_sorted'
fs : sampling rate (Hz) used to extend half-open interval support by 1/fs
"""
# handle deprecated API calls:
if in_memory:
in_core = in_memory
logging.warning("'in_memory' has been deprecated; use 'in_core' instead")
if sort:
assume_sorted = sort
logging.warning("'sort' has been deprecated; use 'assume_sorted' instead")
if fs:
step = 1/fs
logging.warning("'fs' has been deprecated; use 'step' instead")
if inclusive:
assert index, "option 'inclusive' can only be used with 'index=True'"
if in_core:
data = np.asarray(data)
if not assume_sorted:
if not is_sorted(data):
data = np.sort(data) # algorithm assumes sorted list
if step is None:
step = np.median(np.diff(data))
# assuming that data(t1) is sampled somewhere on [t, t+1/fs) we have a 'continuous' signal as long as
# data(t2 = t1+1/fs) is sampled somewhere on [t+1/fs, t+2/fs). In the most extreme case, it could happen
# that t1 = t and t2 = t + 2/fs, i.e. a difference of 2 steps.
if np.any(np.diff(data) < step):
logging.warning("some steps in the data are smaller than the requested step size.")
breaks = np.argwhere(np.diff(data)>=2*step)
starts = np.insert(breaks+1, 0, 0)
stops = np.append(breaks, len(data)-1)
bdries = np.vstack((data[starts], data[stops] + step)).T
if index:
if inclusive:
indices = np.vstack((starts, stops)).T
else:
indices = np.vstack((starts, stops + 1)).T
return indices
else:
from itertools import groupby
from operator import itemgetter
if not assume_sorted:
if not is_sorted(data):
# data = np.sort(data) # algorithm assumes sorted list
raise NotImplementedError("out-of-core sorting has not been implemented yet...")
if step is None:
step = 1
bdries = []
if not index:
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
start = next(gen)
stop = start
for stop in gen:
pass
bdries.append([start, stop + step])
else:
counter = 0
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
_ = next(gen)
start = counter
stop = start
for _ in gen:
stop +=1
if inclusive:
bdries.append([start, stop])
else:
bdries.append([start, stop + 1])
counter = stop + 1
return np.asarray(bdries)
def get_direction(asa, *, sigma=None):
"""Return epochs during which an animal was running left to right, or right
to left.
Parameters
----------
asa : AnalogSignalArray 1D
AnalogSignalArray containing the 1D position data.
sigma : float, optional
Smoothing to apply to position (x) before computing gradient estimate.
Default is 0.
Returns
-------
l2r, r2l : EpochArrays
EpochArrays corresponding to left-to-right and right-to-left movement.
"""
if sigma is None:
sigma = 0
if not isinstance(asa, core.AnalogSignalArray):
raise TypeError('AnalogSignalArray expected!')
assert asa.n_signals == 1, "1D AnalogSignalArray expected!"
direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma),
rectify=False).data
direction[direction>=0] = 1
direction[direction<0] = -1
direction = direction.squeeze()
l2r = get_contiguous_segments(np.argwhere(direction>0).squeeze(), step=1)
l2r[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
l2r = core.EpochArray(asa.abscissa_vals[l2r])
r2l = get_contiguous_segments(np.argwhere(direction<0).squeeze(), step=1)
r2l[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
r2l = core.EpochArray(asa.abscissa_vals[r2l])
return l2r, r2l
class PrettyBytes(int):
"""Prints number of bytes in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
if self.val < 1024:
return '{} bytes'.format(self.val)
elif self.val < 1024**2:
return '{:.3f} kilobytes'.format(self.val/1024)
elif self.val < 1024**3:
return '{:.3f} megabytes'.format(self.val/1024**2)
elif self.val < 1024**4:
return '{:.3f} gigabytes'.format(self.val/1024**3)
def __repr__(self):
return self.__str__()
class PrettyInt(int):
"""Prints integers in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
return '{:,}'.format(self.val)
def __repr__(self):
return '{:,}'.format(self.val)
class PrettyDuration(float):
"""Time duration with pretty print.
Behaves like a float, and can always be cast to a float.
"""
def __init__(self, seconds):
self.duration = seconds
def __str__(self):
return self.time_string(self.duration)
def __repr__(self):
return self.time_string(self.duration)
@staticmethod
def to_dhms(seconds):
"""convert seconds into hh:mm:ss:ms"""
pos = seconds >= 0
if not pos:
seconds = -seconds
ms = seconds % 1; ms = round(ms*10000)/10
seconds = floor(seconds)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
Time = namedtuple('Time', 'pos dd hh mm ss ms')
time = Time(pos=pos, dd=d, hh=h, mm=m, ss=s, ms=ms)
return time
@staticmethod
def time_string(seconds):
"""returns a formatted time string."""
if np.isinf(seconds):
return 'inf'
pos, dd, hh, mm, ss, s = PrettyDuration.to_dhms(seconds)
if s > 0:
if mm == 0:
# in this case, represent milliseconds in terms of
# seconds (i.e. a decimal)
sstr = str(s/1000).lstrip('0')
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
# for all other cases, milliseconds will be represented
# as an integer
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
sstr = ":{:03d}".format(int(s))
else:
sstr = ""
if dd > 0:
daystr = "{:01d} days ".format(dd)
else:
daystr = ""
if hh > 0:
timestr = daystr + "{:01d}:{:02d}:{:02d}{} hours".format(hh, mm, ss, sstr)
elif mm > 0:
timestr = daystr + "{:01d}:{:02d}{} minutes".format(mm, ss, sstr)
elif ss > 0:
timestr = daystr + "{:01d}{} seconds".format(ss, sstr)
else:
timestr = daystr +"{} milliseconds".format(s)
if not pos:
timestr = "-" + timestr
return timestr
def __add__(self, other):
"""a + b"""
return PrettyDuration(self.duration + other)
def __radd__(self, other):
"""b + a"""
return self.__add__(other)
def __sub__(self, other):
"""a - b"""
return PrettyDuration(self.duration - other)
def __rsub__(self, other):
"""b - a"""
return other - self.duration
def __mul__(self, other):
"""a * b"""
return PrettyDuration(self.duration * other)
def __rmul__(self, other):
"""b * a"""
return self.__mul__(other)
def __truediv__(self, other):
"""a / b"""
return PrettyDuration(self.duration / other)
def shrinkMatColsTo(mat, numCols):
""" Docstring goes here
Shrinks a NxM1 matrix down to an NxM2 matrix, where M2 <= M1"""
import scipy.ndimage
numCells = mat.shape[0]
numColsMat = mat.shape[1]
a = np.zeros((numCells, numCols))
for row in np.arange(numCells):
niurou = scipy.ndimage.interpolation.zoom(input=mat[row,:], zoom=(numCols/numColsMat), order = 1)
a[row,:] = niurou
return a
def find_threshold_crossing_events(x, threshold, *, mode='above'):
"""Find threshold crossing events. INCLUSIVE
Parameters
----------
x : numpy array
Input data
threshold : float
The value whose crossing triggers an event
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
Returns
-------
eventlist : list
List containing the indices corresponding to threshold crossings
eventmax : list
List containing the maximum value of each event
"""
from itertools import groupby
from operator import itemgetter
if mode == 'below':
cross_threshold = np.where(x <= threshold, 1, 0)
elif mode == 'above':
cross_threshold = np.where(x >= threshold, 1, 0)
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
eventlist = []
eventmax = []
for k,v in groupby(enumerate(cross_threshold),key=itemgetter(1)):
if k:
v = list(v)
eventlist.append([v[0][0],v[-1][0]])
try :
eventmax.append(x[v[0][0]:(v[-1][0]+1)].max())
except :
print(v, x[v[0][0]:v[-1][0]])
eventmax = np.asarray(eventmax)
eventlist = np.asarray(eventlist)
return eventlist, eventmax
def get_events_boundaries(x, *, PrimaryThreshold=None,
SecondaryThreshold=None,
minThresholdLength=None, minLength=None,
maxLength=None, ds=None, mode='above'):
"""get event boundaries such that event.max >= PrimaryThreshold
and the event extent is defined by SecondaryThreshold.
Note that when PrimaryThreshold==SecondaryThreshold, then this is a
simple threshold crossing algorithm.
NB. minLength and maxLength are applied to the SecondaryThreshold
events, whereas minThresholdLength is applied to the
PrimaryThreshold events.
Parameters
----------
x : numpy array
Input data
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
PrimaryThreshold : float, optional
If mode=='above', requires that event.max >= PrimaryThreshold
If mode=='below', requires that event.min <= PrimaryThreshold
SecondaryThreshold : float, optional
The value that defines the event extent
minThresholdLength : float, optional
Minimum duration for which the PrimaryThreshold is crossed
minLength : float, optional
Minimum duration for which the SecondaryThreshold is crossed
maxLength : float, optional
Maximum duration for which the SecondaryThreshold is crossed
ds : float, optional
Time step of the input data x
Returns
-------
returns bounds, maxes, events
where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive
maxes <==> maximum value during each event
events <==> PrimaryThreshold to PrimaryThreshold, inclusive
"""
# TODO: x must be a numpy array
# TODO: ds is often used, but we have no default, and no check for when
# it is left as None.
# TODO: the Docstring should equally be improved.
x = x.squeeze()
if x.ndim > 1:
raise TypeError("multidimensional arrays not supported!")
if PrimaryThreshold is None: # by default, threshold is 3 SDs above mean of x
PrimaryThreshold = np.mean(x) + 3*np.std(x)
if SecondaryThreshold is None: # by default, revert back to mean of x
SecondaryThreshold = np.mean(x) # + 0*np.std(x)
events, _ = \
find_threshold_crossing_events(x=x,
threshold=PrimaryThreshold,
mode=mode)
# apply minThresholdLength criterion:
if minThresholdLength is not None and len(events) > 0:
durations = (events[:,1] - events[:,0] + 1) * ds
events = events[[durations >= minThresholdLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Find periods where value is > SecondaryThreshold; note that the previous periods should be within these!
if mode == 'above':
assert SecondaryThreshold <= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
elif mode == 'below':
assert SecondaryThreshold >= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
bounds, broader_maxes = \
find_threshold_crossing_events(x=x,
threshold=SecondaryThreshold,
mode=mode)
# Find corresponding big windows for potential events
# Specifically, look for closest left edge that is just smaller
outer_boundary_indices = np.searchsorted(bounds[:,0], events[:,0], side='right')
# searchsorted finds the index after, so subtract one to get index before
outer_boundary_indices = outer_boundary_indices - 1
# Find extended boundaries for events by pairing to larger windows
# (Note that there may be repeats if the larger window contains multiple > 3SD sections)
bounds = bounds[outer_boundary_indices,:]
maxes = broader_maxes[outer_boundary_indices]
if minLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations >= minLength]]
maxes = maxes[[durations >= minLength]]
events = events[[durations >= minLength]]
if maxLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations <= maxLength]]
maxes = maxes[[durations <= maxLength]]
events = events[[durations <= maxLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Now, since all that we care about are the larger windows, so we should get rid of repeats
_, unique_idx = np.unique(bounds[:,0], return_index=True)
bounds = bounds[unique_idx,:] # SecondaryThreshold to SecondaryThreshold
maxes = maxes[unique_idx] # maximum value during event
events = events[unique_idx,:] # PrimaryThreshold to PrimaryThreshold
return bounds, maxes, events
def signal_envelope1D(data, *, sigma=None, fs=None):
logging.warnings("'signal_envelope1D' is deprecated; use 'signal_envelope_1d' instead!")
return signal_envelope_1d(data, sigma=sigma, fs=fs)
def signal_envelope_1d(data, *, sigma=None, fs=None):
"""Finds the signal envelope by taking the absolute value
of the Hilbert transform
Parameters
----------
data : numpy array, list, or RegularlySampledAnalogSignalArray
Input data
If data is a numpy array, it is expected to have shape
(n_signals, n_samples)
If data is a list, it is expected to have length n_signals,
where each sublist has length n_samples, i.e. data is not
jagged
sigma : float, optional
Standard deviation of the Gaussian kernel used to
smooth the envelope after applying the Hilbert transform.
Units of seconds. Default is 4 ms
fs : float, optional
Sampling rate of the signal
Returns
-------
out : same type as the input object
An object containing the signal envelope
TODO: this is not yet epoch-aware!
UPDATE: this is actually epoch-aware by now!
"""
if sigma is None:
sigma = 0.004 # 4 ms standard deviation
if fs is None:
if isinstance(data, (np.ndarray, list)):
raise ValueError("sampling frequency must be specified!")
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
fs = data.fs
if isinstance(data, (np.ndarray, list)):
data_array = np.array(data)
n_dims = np.array(data).ndim
assert n_dims <= 2, "Only 1D signals supported!"
if n_dims == 1:
input_data = data_array.reshape((1, data_array.size))
else:
input_data = data_array
n_signals, n_samples = input_data.shape
# Compute number of samples to compute fast FFTs
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (input_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
if isinstance(data, list):
envelope = envelope.tolist()
return envelope
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
# Only ASA data of shape (n_signals, n_timepoints) -> 2D currently supported
assert data.data.ndim == 2
cum_lengths = np.insert(np.cumsum(data.lengths), 0, 0)
newasa = data.copy()
# for segment in data:
for idx in range(data.n_epochs):
# print('hilberting epoch {}/{}'.format(idx+1, data.n_epochs))
segment_data = data._data[:,cum_lengths[idx]:cum_lengths[idx+1]]
n_signals, n_samples = segment_data.shape
# Compute number of samples to compute fast FFTs:
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (segment_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
newasa._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.atleast_2d(envelope)
return newasa
def nextpower(n, base=2.0):
"""Return the next integral power of two greater than the given number.
Specifically, return m such that
m >= n
m == 2**x
where x is an integer. Use base argument to specify a base other than 2.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (<NAME>)
"""
x = base**ceil (log (n) / log (base))
if type(n) == np.ndarray:
return np.asarray (x, dtype=int)
else:
return int (x)
def nextfastpower(n):
"""Return the next integral power of small factors greater than the given
number. Specifically, return m such that
m >= n
m == 2**x * 3**y * 5**z
where x, y, and z are integers.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (<NAME>)
See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html
"""
if n < 7:
return max (n, 1)
# x, y, and z are all bounded from above by the formula of nextpower.
# Compute all possible combinations for powers of 3 and 5.
# (Not too many for reasonable FFT sizes.)
def power_series (x, base):
nmax = ceil (log (x) / log (base))
return np.logspace (0.0, nmax, num=nmax+1, base=base)
n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0))
n35 = n35[n35<=n]
# Lump the powers of 3 and 5 together and solve for the powers of 2.
n2 = nextpower (n / n35)
return int (min (n2 * n35))
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def gaussian_filter(obj, *, fs=None, sigma=None, truncate=None, inplace=False, mode=None, cval=None, within_intervals=False):
"""Smooths with a Gaussian kernel.
Smoothing is applied along the abscissa, and the same smoothing is applied to each
signal in the RegularlySampledAnalogSignalArray, or to each unit in a BinnedSpikeTrainArray.
Smoothing is applied ACROSS intervals, but smoothing WITHIN intervals is also supported.
Parameters
----------
obj : RegularlySampledAnalogSignalArray or BinnedSpikeTrainArray.
fs : float, optional
Sampling rate (in obj.base_unit^-1) of obj. If not provided, it will
be inferred.
sigma : float, optional
Standard deviation of Gaussian kernel, in obj.base_units. Default is 0.05
(50 ms if base_unit=seconds).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0.
inplace : bool
If True the data will be replaced with the smoothed data.
Default is False.
mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional
The mode parameter determines how the array borders are handled,
where cval is the value when mode is equal to ‘constant’. Default is
‘reflect’.
cval : scalar, optional
Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
within_intervals : boolean, optional
If True, then smooth within each epoch. Otherwise smooth across epochs.
Default is False.
Note that when mode = 'wrap', then smoothing within epochs aren't affected
by wrapping.
Returns
-------
out : same type as obj
An object with smoothed data is returned.
"""
if sigma is None:
sigma = 0.05
if truncate is None:
truncate = 4
if mode is None:
mode = 'reflect'
if cval is None:
cval = 0.0
if not inplace:
out = copy.deepcopy(obj)
else:
out = obj
if isinstance(out, core.RegularlySampledAnalogSignalArray):
if fs is None:
fs = out.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
elif isinstance(out, core.BinnedEventArray):
bst = out
if fs is None:
fs = 1/bst.ds
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
else:
raise NotImplementedError("gaussian_filter for {} is not yet supported!".format(str(type(out))))
sigma = sigma * fs
if not within_intervals:
# see https://stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python
# (1) if smoothing across intervals, we work on a merged support
# (2) build abscissa_vals, including existing ones, and out-of-support ones
# (3) to smooth U, build auxiliary arrays V and W, with (V=U).nan=0, and (W=1).nan=0
# (4) Z = smooth(V)/smooth(W)
# (5) only keep original support, and original abscissa_vals
if isinstance(out, (core.RegularlySampledAnalogSignalArray, core.BinnedEventArray)):
support = out._abscissa.support.merge()
if not support.domain.is_finite:
support.domain = (support.start, support.stop) #TODO: #FIXME might come from abscissa definition, and not from support
missing_abscissa_vals = []
for interval in (~support):
missing_vals = frange(interval.start, interval.stop, 1/fs)
missing_abscissa_vals.extend(missing_vals)
if isinstance(out, core.RegularlySampledAnalogSignalArray):
n_signals = out.n_signals
n_samples = out.n_samples
elif isinstance(out, core.BinnedEventArray):
n_signals = out.n_series
n_samples = out.n_bins
V = np.zeros((n_signals, n_samples + len(missing_abscissa_vals)))
W = np.ones(V.shape)
all_abscissa_vals = np.sort(np.append(out._abscissa_vals, missing_abscissa_vals))
data_idx = np.searchsorted(all_abscissa_vals, out._abscissa_vals)
missing_idx = | np.searchsorted(all_abscissa_vals, missing_abscissa_vals) | numpy.searchsorted |
import numpy as np
import copy
import os
import gc
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from src.data import *
from src.models import *
from src.fedavg import *
from src.client import *
from src.clustering import *
from src.utils import *
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(args.gpu) ## Setting cuda on GPU
def mkdirs(dirpath):
try:
os.makedirs(dirpath)
except Exception as _:
pass
path = args.savedir + args.alg + '/' + args.partition + '/' + args.dataset + '/' #+ str(args.trial)
mkdirs(path)
##################################### Data partitioning section
args.local_view = True
X_train, y_train, X_test, y_test, net_dataidx_map, net_dataidx_map_test, \
traindata_cls_counts, testdata_cls_counts = partition_data(args.dataset,
args.datadir, args.logdir, args.partition, args.num_users, beta=args.beta, local_view=args.local_view)
train_dl_global, test_dl_global, train_ds_global, test_ds_global = get_dataloader(args.dataset,
args.datadir,
args.batch_size,
32)
print("len train_ds_global:", len(train_ds_global))
print("len test_ds_global:", len(test_ds_global))
################################### Shared Data
idxs_test = np.arange(len(test_ds_global))
labels_test = np.array(test_ds_global.target)
# Sort Labels Train
idxs_labels_test = np.vstack((idxs_test, labels_test))
idxs_labels_test = idxs_labels_test[:, idxs_labels_test[1, :].argsort()]
idxs_test = idxs_labels_test[0, :]
labels_test = idxs_labels_test[1, :]
idxs_test_shared = []
N = 250
ind = 0
for k in range(10):
ind = max(np.where(labels_test==k)[0])
idxs_test_shared.extend(idxs_test[(ind - N):(ind)])
test_targets = np.array(test_ds_global.target)
for i in range(10):
print(f'Shared data has label: {i}, {len(np.where(test_targets[idxs_test_shared[i*N:(i+1)*N]]==i)[0])} samples')
shared_data_loader = DataLoader(DatasetSplit(test_ds_global, idxs_test_shared), batch_size=N, shuffle=False)
for x,y in shared_data_loader:
print(x.shape)
################################### build model
print(f'MODEL: {args.model}, Dataset: {args.dataset}')
users_model = []
if args.model == 'lenet5' and args.dataset == 'cifar10':
net_glob = LeNet5Cifar10().to(args.device)
net_glob.apply(weight_init)
users_model = [LeNet5Cifar10().to(args.device).apply(weight_init) for _ in range(args.num_users)]
elif args.model == 'lenet5' and args.dataset == 'cifar100':
net_glob = LeNet5Cifar100().to(args.device)
net_glob.apply(weight_init)
users_model = [LeNet5Cifar100().to(args.device).apply(weight_init) for _ in range(args.num_users)]
elif args.model == 'lenet5' and args.dataset == 'mnist':
net_glob = LeNet5Mnist().to(args.device)
net_glob.apply(weight_init)
users_model = [LeNet5Mnist().to(args.device).apply(weight_init) for _ in range(args.num_users)]
if args.load_initial:
initial_state_dict = torch.load(args.load_initial)
net_glob.load_state_dict(initial_state_dict)
initial_state_dict = copy.deepcopy(net_glob.state_dict())
server_state_dict = copy.deepcopy(net_glob.state_dict())
for i in range(args.num_users):
users_model[i].load_state_dict(initial_state_dict)
print(net_glob)
total = 0
for name, param in net_glob.named_parameters():
print(name, param.size())
total += np.prod(param.size())
#print(np.array(param.data.cpu().numpy().reshape([-1])))
#print(isinstance(param.data.cpu().numpy(), np.array))
print(total)
################################# Initializing Clients
clients = []
for idx in range(args.num_users):
dataidxs = net_dataidx_map[idx]
if net_dataidx_map_test is None:
dataidx_test = None
else:
dataidxs_test = net_dataidx_map_test[idx]
#print(f'Initializing Client {idx}')
noise_level = args.noise
if idx == args.num_users - 1:
noise_level = 0
if args.noise_type == 'space':
train_dl_local, test_dl_local, train_ds_local, test_ds_local = get_dataloader(args.dataset,
args.datadir, args.local_bs, 32,
dataidxs, noise_level, idx,
args.num_users-1,
dataidxs_test=dataidxs_test)
else:
noise_level = args.noise / (args.num_users - 1) * idx
train_dl_local, test_dl_local, train_ds_local, test_ds_local = get_dataloader(args.dataset,
args.datadir, args.local_bs, 32,
dataidxs, noise_level,
dataidxs_test=dataidxs_test)
clients.append(Client_ClusterFL(idx, copy.deepcopy(users_model[idx]), args.local_bs, args.local_ep,
args.lr, args.momentum, args.device, train_dl_local, test_dl_local))
###################################### Federation
float_formatter = "{:.4f}".format
#np.set_printoptions(formatter={float: float_formatting_function})
np.set_printoptions(formatter={'float_kind':float_formatter})
loss_train = []
init_tracc_pr = [] # initial train accuracy for each round
final_tracc_pr = [] # final train accuracy for each round
init_tacc_pr = [] # initial test accuarcy for each round
final_tacc_pr = [] # final test accuracy for each round
init_tloss_pr = [] # initial test loss for each round
final_tloss_pr = [] # final test loss for each round
clients_best_acc = [0 for _ in range(args.num_users)]
w_locals, loss_locals = [], []
init_local_tacc = [] # initial local test accuracy at each round
final_local_tacc = [] # final local test accuracy at each round
init_local_tloss = [] # initial local test loss at each round
final_local_tloss = [] # final local test loss at each round
ckp_avg_tacc = []
ckp_avg_best_tacc = []
w_glob_per_cluster = []
users_best_acc = [0 for _ in range(args.num_users)]
best_glob_acc = 0
best_glob_w = None
idx_cluster = 0
selected_clusters = {i: [] for i in range(10)}
clust_err = []
clust_acc = []
count_clusters = {i:0 for i in range(1, args.rounds)}
for iteration in range(args.rounds):
m = max(int(args.frac * args.num_users), 1)
idxs_users = np.random.choice(range(args.num_users), m, replace=False)
print(f'###### ROUND {iteration+1} ######')
print(f'Clients {idxs_users}')
selected_clusters.clear()
if iteration+1 > 1:
selected_clusters = {i: [] for i in range(len(clusters))}
for idx in idxs_users:
if iteration+1 > 1:
assert (len(clusters) == len(w_glob_per_cluster))
count_clusters[iteration] = len(clusters)
acc_select = []
for i in range(len(clusters)):
clients[idx].set_state_dict(copy.deepcopy(w_glob_per_cluster[i]))
loss, acc = clients[idx].eval_test()
acc_select.append(acc)
idx_cluster = np.argmax(acc_select)
clients[idx].set_state_dict(copy.deepcopy(w_glob_per_cluster[idx_cluster]))
selected_clusters[idx_cluster].append(idx)
print(f'Client {idx}, Select Cluster: {idx_cluster}')
print(f'acc clusters: {acc_select}')
loss, acc = clients[idx].eval_test()
init_local_tacc.append(acc)
init_local_tloss.append(loss)
loss = clients[idx].train(is_print=False)
w_locals.append(copy.deepcopy(clients[idx].get_state_dict()))
loss_locals.append(copy.deepcopy(loss))
loss, acc = clients[idx].eval_test()
if acc > clients_best_acc[idx]:
clients_best_acc[idx] = acc
final_local_tacc.append(acc)
final_local_tloss.append(loss)
# Finding clusters
clusters, clusters_bm, w_locals_clusters, clients_correct_pred_per_label, clients_similarity, mat_sim, A = \
cluster_logits(idxs_users, clients, shared_data_loader, args, alpha=args.cluster_alpha,
nclasses=args.nclasses, nsamples=args.nsamples_shared)
## Clustering Error
c_err, c_acc = error_clustering(clusters_bm, idxs_users, traindata_cls_counts)
clust_err.append(c_err)
clust_acc.append(c_acc)
clusters_label = []
clusters_client_label = []
for c in clusters:
temp = []
temp2 = []
for k in c:
temp2.append(list(traindata_cls_counts[k].keys()))
temp.extend(list(traindata_cls_counts[k].keys()))
clusters_client_label.append(temp2)
temp = list(set(temp))
clusters_label.append(temp)
# FedAvg per cluster
total_data_points = [sum([len(net_dataidx_map[r]) for r in clust]) for clust in clusters]
fed_avg_freqs = [[len(net_dataidx_map[r]) / total_data_points[clust_id] for r in clusters[clust_id]]
for clust_id in range(len(clusters))]
w_glob_per_cluster.clear()
acc_glob_pc = []
for i in range(len(clusters)):
ww = FedAvg(w_locals_clusters[i], weight_avg = fed_avg_freqs[i])
w_glob_per_cluster.append(ww)
net_glob.load_state_dict(copy.deepcopy(ww))
_, acc = eval_test(net_glob, args, test_dl_global)
if acc > best_glob_acc:
best_glob_acc = acc
best_glob_w = copy.deepcopy(ww)
acc_glob_pc.append(acc)
idx_cluster = np.argmax(acc_glob_pc)
# update global weights
w_glob = FedAvg(w_locals)
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
avg_init_tloss = sum(init_local_tloss) / len(init_local_tloss)
avg_init_tacc = sum(init_local_tacc) / len(init_local_tacc)
avg_final_tloss = sum(final_local_tloss) / len(final_local_tloss)
avg_final_tacc = sum(final_local_tacc) / len(final_local_tacc)
print('## END OF ROUND ##')
template = 'Average Train loss {:.3f}'
print(template.format(loss_avg))
template = "AVG Init Test Loss: {:.3f}, AVG Init Test Acc: {:.3f}"
print(template.format(avg_init_tloss, avg_init_tacc))
template = "AVG Final Test Loss: {:.3f}, AVG Final Test Acc: {:.3f}"
print(template.format(avg_final_tloss, avg_final_tacc))
if iteration%args.print_freq == 0 and iteration != 0:
print('--- PRINTING ALL CLIENTS STATUS ---')
current_acc = []
for k in range(args.num_users):
loss, acc = clients[k].eval_test()
current_acc.append(acc)
template = ("Client {:3d}, labels {}, count {}, best_acc {:3.3f}, current_acc {:3.3f} \n")
print(template.format(k, traindata_cls_counts[k], clients[k].get_count(),
clients_best_acc[k], current_acc[-1]))
template = ("Round {:1d}, Avg current_acc {:3.3f}, Avg best_acc {:3.3f}")
print(template.format(iteration+1, np.mean(current_acc), np.mean(clients_best_acc)))
ckp_avg_tacc.append(np.mean(current_acc))
ckp_avg_best_tacc.append(np.mean(clients_best_acc))
print('----- Analysis End of Round -------')
for idx in idxs_users:
print(f'Client {idx}, Count: {clients[idx].get_count()}, Labels: {traindata_cls_counts[idx]}')
print('')
for idx in idxs_users:
print(f'Client {idx}, Correct_pred_per_label: {clients_correct_pred_per_label[idx]}')
#print(f'similarity: {clients_similarity[idx]:}')
print('')
print(f'Similarity Matrix: \n {mat_sim}')
print('')
print(f'Selected Clusters {selected_clusters}')
print('')
print(f'New Cluster {clusters}')
print(f'Error of Clustering {clust_err[-1]}')
print(f'Acc of Clustering {clust_acc[-1]}')
print(f'Clusters Lables {clusters_label}')
print(f'Clusters Clients Lables {clusters_client_label}')
print(f'Clusters Glob Acc: {acc_glob_pc}')
loss_train.append(loss_avg)
init_tacc_pr.append(avg_init_tacc)
init_tloss_pr.append(avg_init_tloss)
final_tacc_pr.append(avg_final_tacc)
final_tloss_pr.append(avg_final_tloss)
#break;
## clear the placeholders for the next round
w_locals.clear()
loss_locals.clear()
init_local_tacc.clear()
init_local_tloss.clear()
final_local_tacc.clear()
final_local_tloss.clear()
## calling garbage collector
gc.collect()
############################### Saving Training Results
with open(path+str(args.trial)+'_loss_train.npy', 'wb') as fp:
loss_train = np.array(loss_train)
np.save(fp, loss_train)
with open(path+str(args.trial)+'_init_tacc_pr.npy', 'wb') as fp:
init_tacc_pr = np.array(init_tacc_pr)
np.save(fp, init_tacc_pr)
with open(path+str(args.trial)+'_init_tloss_pr.npy', 'wb') as fp:
init_tloss_pr = np.array(init_tloss_pr)
np.save(fp, init_tloss_pr)
with open(path+str(args.trial)+'_final_tacc_pr.npy', 'wb') as fp:
final_tacc_pr = | np.array(final_tacc_pr) | numpy.array |
import unittest
import numpy as np
from nn.mlp import mlp_shape_dimension
from nn.optimization_strategy import MlpStrategy
from pso.multi_swarm import MultiSwarm, MultiParticle
from pso.swarm import SwarmConfig
from util.plotting import plot_fitness
class MlpStrategyTestCase(unittest.TestCase):
def test_mlp_strategy_creation(self):
swarm_config = SwarmConfig(number_of_particles=20, particle_size=30, lower_bound=-0.5, upper_bound=0.5)
x_training = np.random.uniform(size=(20, 7))
y_training = np.random.uniform(size=(20, 2))
x_validation = np.random.uniform(size=(10, 7))
y_validation = np.random.uniform(size=(10, 2))
strategy = MlpStrategy(
inner_swarm_config=swarm_config,
x_training=x_training,
y_training=y_training,
x_validation=x_validation,
y_validation=y_validation,
)
self.assertIs(strategy.inner_config, swarm_config)
self.assertIs(strategy.x_training, x_training)
self.assertIs(strategy.y_training, y_training)
self.assertIs(strategy.x_validation, x_validation)
self.assertIs(strategy.y_validation, y_validation)
self.assertIs(strategy.number_of_inputs, 7)
self.assertIs(strategy.number_of_outputs, 2)
def test_create_inner_swarm(self):
swarm_config = SwarmConfig(number_of_particles=20, particle_size=30, lower_bound=-0.5, upper_bound=0.5)
x_training = np.random.uniform(size=(20, 7))
y_training = np.random.uniform(size=(20, 2))
x_validation = np.random.uniform(size=(10, 7))
y_validation = np.random.uniform(size=(10, 2))
strategy = MlpStrategy(
inner_swarm_config=swarm_config,
x_training=x_training,
y_training=y_training,
x_validation=x_validation,
y_validation=y_validation,
)
dimension = mlp_shape_dimension((7, 3, 5, 2))
swarm = strategy.create_inner_swarm(np.asarray([3.4, 5.6]))
self.assertEqual(swarm.config().upper_bound, swarm_config.upper_bound)
self.assertEqual(swarm.config().lower_bound, swarm_config.lower_bound)
self.assertEqual(swarm.config().number_of_particles, swarm_config.number_of_particles)
self.assertEqual(swarm.config().particle_size, dimension)
def test_integrated_optimization(self):
# initial particle size is whatever because it will vary with the architecture
inner_config = SwarmConfig(number_of_particles=10, particle_size=30, lower_bound=-0.5, upper_bound=0.5)
x_training = np.random.uniform(size=(20, 7))
y_training = np.random.uniform(size=(20, 2))
x_validation = np.random.uniform(size=(10, 7))
y_validation = np.random.uniform(size=(10, 2))
strategy = MlpStrategy(
inner_swarm_config=inner_config,
x_training=x_training,
y_training=y_training,
x_validation=x_validation,
y_validation=y_validation,
)
outer_config = SwarmConfig(number_of_particles=10, particle_size=3, lower_bound=1, upper_bound=12)
swarm = MultiSwarm(outer_config, strategy)
swarm.fly(2, 10)
first_outer_fitness = swarm.best_outer_fitness()
first_inner_fitness = swarm.best_inner_fitness()
swarm.fly(3, 10)
second_outer_fitness = swarm.best_outer_fitness()
second_inner_fitness = swarm.best_inner_fitness()
self.assertTrue(second_outer_fitness <= first_outer_fitness)
self.assertTrue(second_inner_fitness <= first_inner_fitness)
# plot_fitness(
# inner_swarm_fitness_progress=swarm.inner_swarm_fitness_progress(),
# outer_swarm_fitness_progress=swarm.outer_swarm_fitness_progress()
# )
print('Best architecture: [{}, {}, {}]'.format(7, swarm.best_outer_position(), 2))
print('Best particle: ', swarm.best_inner_position())
print('Best outer fitness ', second_outer_fitness)
print('Best inner fitness ', second_inner_fitness)
def test_initial_inner_position_for_outer_position(self):
inner_config = SwarmConfig(number_of_particles=10, particle_size=30, lower_bound=-0.5, upper_bound=0.5)
x_training = np.random.uniform(size=(20, 7))
y_training = np.random.uniform(size=(20, 2))
x_validation = np.random.uniform(size=(10, 7))
y_validation = np.random.uniform(size=(10, 2))
strategy = MlpStrategy(
inner_swarm_config=inner_config,
x_training=x_training,
y_training=y_training,
x_validation=x_validation,
y_validation=y_validation,
)
initial_position = strategy.initial_inner_position_for_outer_position(np.asarray([2, 3]))
self.assertEqual(len(initial_position), 33)
def test_best_inner_position_for_outer_particle(self):
inner_config = SwarmConfig(number_of_particles=10, particle_size=30, lower_bound=-0.5, upper_bound=0.5)
x_training = np.random.uniform(size=(20, 7))
y_training = np.random.uniform(size=(20, 2))
x_validation = np.random.uniform(size=(10, 7))
y_validation = np.random.uniform(size=(10, 2))
strategy = MlpStrategy(
inner_swarm_config=inner_config,
x_training=x_training,
y_training=y_training,
x_validation=x_validation,
y_validation=y_validation,
)
best_so_far = MultiParticle(fitness=0.5,
inner_position=np.random.uniform(size=33),
outer_position= | np.asarray([2, 3]) | numpy.asarray |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# https://traveling-santa.reaktor.com/
import numpy as np
from copy import deepcopy
R_EARTH = 6378
HOME_ID = 1
ID = 0
LAT = 1
LONG = 2
WEIGHT = 3
MAX_WEIGHT = 10000000 # max 10000 kg = 10 million gramms
START_LAT = 68.073611
START_LONG = 29.315278
FILENAME = 'nicelist.txt'
NEIGHBOR_FILE = 'neighbor.txt'
NEIGHBOR_TOT_FILE = 'neighbor-total.txt'
# ----------------------------------------------------------------------
class Child:
def __init__(self, id, latitude, longitude, weight):
self.id = id
self.latitude = latitude
self.longitude = longitude
self.weight = weight
self.xyz = to_xyz(latitude, longitude, 1.0)
def __str__(self):
return str(self.id)
#return str((self.id, self.latitude, self.longitude, self.weight))
# ----------------------------------------------------------------------
# latitude, longitude, altitude to 3D-vector coordinates
# formula source: ??? differs from e.g. Wikipedia ...
def to_xyz(lat, long, alt):
from math import cos, sin, pi
from numpy import array
phi = lat * pi / 180.
ksi = long * pi / 180.
x = alt * cos(phi) * cos(ksi)
y = alt * cos(phi) * sin (ksi)
z = alt * sin(phi)
return array([x, y, z])
# ----------------------------------------------------------------------
# https://en.wikipedia.org/wiki/Great-circle_distance#Vector_version
def distance(u, v):
from math import atan2
from numpy.linalg import norm
from numpy import cross, dot
return R_EARTH * atan2(norm(cross(u,v)), dot(u,v))
#-------------------------------------------------------------------------
# draw a sphere with nodes
def visualize(nodes, start_point):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
r = 0.99 # slight shrinkage for the unit sphere to better view the points
fig = plt.figure(figsize = (14, 14))
ax = fig.add_subplot(111, projection = '3d', aspect = 'equal')
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = r * np.outer(np.cos(u), np.sin(v))
y = r * np.outer(np.sin(u), np.sin(v))
z = r * np.outer(np.ones( | np.size(u) | numpy.size |
"""
This module (along with a few functions in :mod:`.helper_functions`) contains
everything that is needed to calculate instantons in one field dimension.
The primary class is :class:`SingleFieldInstanton`, which can calculate the
instanton solution in any number of spatial dimensions using the overshoot /
undershoot method. Additional classes inherit common functionality from this
one, and can be used to calculate the bubble wall profile with constant
friction (:class:`WallWithConstFriction`) instead of radius-dependent friction,
or to calculate the instanton in the presence of gravity (*not yet
implemented*).
.. todo::
Create and document a *CDL_Instanton* class for tunneling with gravity.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import optimize, integrate, special, interpolate
from collections import namedtuple
from . import helper_functions
from .helper_functions import rkqs, IntegrationError, clampVal
from .helper_functions import cubicInterpFunction
import sys
if sys.version_info >= (3,0):
xrange = range
class PotentialError(Exception):
"""
Used when the potential does not have the expected characteristics.
The error messages should be tuples, with the second item being one of
``("no barrier", "stable, not metastable")``.
"""
pass
class SingleFieldInstanton:
"""
This class will calculate properties of an instanton with a single scalar
Field without gravity using the overshoot/undershoot method.
Most users will probably be primarily interested in the functions
:func:`findProfile` and :func:`findAction`.
Note
----
When the bubble is thin-walled (due to nearly degenerate minima), an
approximate solution is found to the equations of motion and integration
starts close to the wall itself (instead of always starting at the center
of the bubble). This way the overshoot/undershoot method runs just as fast
for extremely thin-walled bubbles as it does for thick-walled bubbles.
Parameters
----------
phi_absMin : float
The field value at the stable vacuum to which the instanton
tunnels. Nowhere in the code is it *required* that there actually be a
minimum at `phi_absMin`, but the :func:`findProfile` function will only
use initial conditions between `phi_absMin` and `phi_metaMin`, and the
code is optimized for thin-walled bubbles when the center of the
instanton is close to `phi_absMin`.
phi_metaMin : float
The field value in the metastable vacuum.
V : callable
The potential function. It should take as its single parameter the field
value `phi`.
dV, d2V : callable, optional
The potential's first and second derivatives. If not None, these
override the methods :func:`dV` and :func:`d2V`.
phi_eps : float, optional
A small value used to calculate derivatives (if not overriden by
the user) and in the function :func:`dV_from_absMin`. The input should
be unitless; it is later rescaled by ``abs(phi_absMin - phi_metaMin)``.
alpha : int or float, optional
The coefficient for the friction term in the ODE. This is also
the number of spacetime dimensions minus 1.
phi_bar : float, optional
The field value at the edge of the barrier. If `None`, it is found by
:func:`findBarrierLocation`.
rscale : float, optional
The approximate radial scale of the instanton. If `None` it is found by
:func:`findRScale`.
Raises
------
PotentialError
when the barrier is non-existent or when the presumably stable minimum
has a higher energy that the metastable minimum.
Examples
--------
Thick and thin-walled bubbles:
.. plot::
:include-source:
from cosmoTransitions.tunneling1D import SingleFieldInstanton
import matplotlib.pyplot as plt
# Thin-walled
def V1(phi): return 0.25*phi**4 - 0.49*phi**3 + 0.235 * phi**2
def dV1(phi): return phi*(phi-.47)*(phi-1)
profile = SingleFieldInstanton(1.0, 0.0, V1, dV1).findProfile()
plt.plot(profile.R, profile.Phi)
# Thick-walled
def V2(phi): return 0.25*phi**4 - 0.4*phi**3 + 0.1 * phi**2
def dV2(phi): return phi*(phi-.2)*(phi-1)
profile = SingleFieldInstanton(1.0, 0.0, V2, dV2).findProfile()
plt.plot(profile.R, profile.Phi)
plt.xlabel(r"Radius $r$")
plt.ylabel(r"Field $\phi$")
plt.show()
"""
def __init__(self, phi_absMin, phi_metaMin, V,
dV=None, d2V=None, phi_eps=1e-3, alpha=2,
phi_bar=None, rscale=None):
self.phi_absMin, self.phi_metaMin = phi_absMin, phi_metaMin
self.V = V
if V(phi_metaMin) <= V(phi_absMin):
raise PotentialError("V(phi_metaMin) <= V(phi_absMin); "
"tunneling cannot occur.", "stable, not metastable")
if dV is not None:
self.dV = dV
if d2V is not None:
self.d2V = d2V
if phi_bar is None:
self.phi_bar = self.findBarrierLocation()
else:
self.phi_bar = phi_bar
if rscale is None:
self.rscale = self.findRScale()
else:
self.rscale = rscale
self.alpha = alpha
self.phi_eps = phi_eps * abs(phi_absMin - phi_metaMin)
def dV(self, phi):
R"""
Calculates `dV/dphi` using finite differences.
The finite difference is given by `self.phi_eps`, and the derivative
is calculated to fourth order.
"""
eps = self.phi_eps
V = self.V
return (V(phi-2*eps) - 8*V(phi-eps) + 8*V(phi+eps) - V(phi+2*eps)
) / (12.*eps)
def dV_from_absMin(self, delta_phi):
R"""
Calculates `dV/dphi` at ``phi = phi_absMin + delta_phi``.
It is sometimes helpful to find `dV/dphi` extremely close to the
minimum. In this case, floating-point error can be significant. To get
increased accuracy, this function expands about the minimum in
a Taylor series and uses that for nearby values. That is,
:math:`V'(\phi) \approx V''(\phi_{\rm absMin})(\phi-\phi_{\rm absMin})`.
For values that are farther away, it instead uses :func:`dV`.
It blends the two methods so that there are no numerical
discontinuities.
This uses `self.phi_eps` to determine whether the field is considered
nearby or not.
"""
phi = self.phi_absMin + delta_phi
dV = self.dV(phi)
# If phi is very close to phi_absMin, it should be safer to assume
# that dV is zero exactly at phi_absMin and instead calculate dV from
# d2V.
if self.phi_eps > 0:
dV_ = self.d2V(phi) * delta_phi
# blend the two together so that there are no discontinuites
blend_factor = np.exp(-(delta_phi/self.phi_eps)**2)
dV = dV_*blend_factor + dV*(1-blend_factor)
return dV
def d2V(self, phi):
R"""
Calculates `d^2V/dphi^2` using finite differences.
The finite difference is given by `self.phi_eps`, and the derivative
is calculated to fourth order.
"""
eps = self.phi_eps
V = self.V
return (-V(phi-2*eps) + 16*V(phi-eps) - 30*V(phi)
+ 16*V(phi+eps) - V(phi+2*eps)) / (12.*eps*eps)
def findBarrierLocation(self):
R"""
Find edge of the potential barrier.
Returns
-------
phi_barrier : float
The value such that `V(phi_barrier) = V(phi_metaMin)`
"""
phi_tol = abs(self.phi_metaMin - self.phi_absMin) * 1e-12
V_phimeta = self.V(self.phi_metaMin)
phi1 = self.phi_metaMin
phi2 = self.phi_absMin
phi0 = 0.5 * (phi1+phi2)
# Do a very simple binary search to narrow down on the right answer.
while abs(phi1-phi2) > phi_tol:
V0 = self.V(phi0)
if V0 > V_phimeta:
phi1 = phi0
else:
phi2 = phi0
phi0 = 0.5 * (phi1+phi2)
return phi0
def findRScale(self):
R"""
Find the characteristic length scale for tunneling over the potential
barrier.
The characteristic length scale should formally be given by the period
of oscillations about the top of the potential barrier. However, it is
perfectly acceptable for the potential barrier to have a flat top, in
which case a naive calculation of the length scale would be infinite.
Instead, this function finds the top of the barrier along with a cubic
function that has a maximum at the barrier top and a minimum at the
metastable minimum. The returned length scale is then the period of
oscillations about this cubic maximum.
Raises
------
PotentialError
when the barrier is non-existent.
"""
"""
NOT USED:
We could also do a sanity check in case the barrier goes to zero.
A second way of finding the scale is to see how long it would take
the field to roll from one minimum to the other if the potential were
purely linear and there were no friction.
Parameters
----------
second_check : float
If bigger than zero, do the sanity check. Return value is then the
larger of the first scale and the second scale times
`second_check`.
"""
phi_tol = abs(self.phi_bar - self.phi_metaMin) * 1e-6
x1 = min(self.phi_bar, self.phi_metaMin)
x2 = max(self.phi_bar, self.phi_metaMin)
phi_bar_top = optimize.fminbound(
lambda x: -self.V(x), x1, x2, xtol=phi_tol)
if phi_bar_top + phi_tol > x2 or phi_bar_top - phi_tol < x1:
raise PotentialError(
"Minimization is placing the top of the "
"potential barrier outside of the interval defined by "
"phi_bar and phi_metaMin. Assume that the barrier does not exist.",
"no barrier")
Vtop = self.V(phi_bar_top) - self.V(self.phi_metaMin)
xtop = phi_bar_top - self.phi_metaMin
# Cubic function given by (ignoring linear and constant terms):
# f(x) = C [(-1/3)x^3 + (1/2)x^2 xtop]
# C = 6 Vtop / xtop^3
# f''(xtop) = - C xtop
# d2V = -6*Vtop / xtop**2
# rscale = 1 / sqrt(d2V)
if Vtop <= 0:
raise PotentialError("Barrier height is not positive, "
"does not exist.", "no barrier")
rscale1 = abs(xtop) / np.sqrt(abs(6*Vtop))
return rscale1
# The following would calculate it a separate way, but this goes
# to infinity when delta_V goes to zero, so it's a bad way of doing it
delta_phi = abs(self.phi_absMin - self.phi_metaMin)
delta_V = abs(self.V(self.phi_absMin) - self.V(self.phi_metaMin))
rscale2 = np.sqrt(2*delta_phi**2 / (delta_V+1e-100))
return max(rscale1, rscale2)
_exactSolution_rval = namedtuple("exactSolution_rval", "phi dphi")
def exactSolution(self, r, phi0, dV, d2V):
R"""
Find `phi(r)` given `phi(r=0)`, assuming a quadratic potential.
Parameters
----------
r : float
The radius at which the solution should be calculated.
phi0 : float
The field at `r=0`.
dV, d2V : float
The potential's first and second derivatives evaluated at `phi0`.
Returns
-------
phi, dphi : float
The field and its derivative evaluated at `r`.
Notes
-----
If the potential at the point :math:`\phi_0` is a simple quadratic, the
solution to the instanton equation of motion can be determined exactly.
The non-singular solution to
.. math::
\frac{d^2\phi}{dr^2} + \frac{\alpha}{r}\frac{d\phi}{dr} =
V'(\phi_0) + V''(\phi_0) (\phi-\phi_0)
is
.. math::
\phi(r)-\phi_0 = \frac{V'}{V''}\left[
\Gamma(\nu+1)\left(\frac{\beta r}{2}\right)^{-\nu} I_\nu(\beta r) - 1
\right]
where :math:`\nu = \frac{\alpha-1}{2}`, :math:`I_\nu` is the modified
Bessel function, and :math:`\beta^2 = V''(\phi_0) > 0`. If instead
:math:`-\beta^2 = V''(\phi_0) < 0`, the solution is the same but with
:math:`I_\nu \rightarrow J_\nu`.
"""
beta = np.sqrt(abs(d2V))
beta_r = beta*r
nu = 0.5 * (self.alpha - 1)
gamma = special.gamma # Gamma function
iv, jv = special.iv, special.jv # (modified) Bessel function
if beta_r < 1e-2:
# Use a small-r approximation for the Bessel function.
s = +1 if d2V > 0 else -1
phi = 0.0
dphi = 0.0
for k in xrange(1,4):
_ = (0.5*beta_r)**(2*k-2) * s**k / (gamma(k+1)*gamma(k+1+nu))
phi += _
dphi += _ * (2*k)
phi *= 0.25 * gamma(nu+1) * r**2 * dV * s
dphi *= 0.25 * gamma(nu+1) * r * dV * s
phi += phi0
elif d2V > 0:
import warnings
# If beta_r is very large, this will throw off overflow and divide
# by zero errors in iv(). It will return np.inf though, which is
# what we want. Just ignore the warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
phi = (gamma(nu+1)*(0.5*beta_r)**-nu * iv(nu, beta_r)-1) * dV/d2V
dphi = -nu*((0.5*beta_r)**-nu / r) * iv(nu, beta_r)
dphi += (0.5*beta_r)**-nu * 0.5*beta \
* (iv(nu-1, beta_r)+iv(nu+1, beta_r))
dphi *= gamma(nu+1) * dV/d2V
phi += phi0
else:
phi = (gamma(nu+1)*(0.5*beta_r)**-nu * jv(nu, beta_r) - 1) * dV/d2V
dphi = -nu*((0.5*beta_r)**-nu / r) * jv(nu, beta_r)
dphi += (0.5*beta_r)**-nu * 0.5*beta \
* (jv(nu-1, beta_r)-jv(nu+1, beta_r))
dphi *= gamma(nu+1) * dV/d2V
phi += phi0
return self._exactSolution_rval(phi, dphi)
_initialConditions_rval = namedtuple(
"initialConditions_rval", "r0 phi dphi")
def initialConditions(self, delta_phi0, rmin, delta_phi_cutoff):
R"""
Finds the initial conditions for integration.
The instanton equations of motion are singular at `r=0`, so we
need to start the integration at some larger radius. This
function finds the value `r0` such that `phi(r0) = phi_cutoff`.
If there is no such value, it returns the intial conditions at `rmin`.
Parameters
----------
delta_phi0 : float
`delta_phi0 = phi(r=0) - phi_absMin`
rmin : float
The smallest acceptable radius at which to start integration.
delta_phi_cutoff : float
The desired value for `phi(r0)`.
`delta_phi_cutoff = phi(r0) - phi_absMin`.
Returns
-------
r0, phi, dphi : float
The initial radius and the field and its derivative at that radius.
Notes
-----
The field values are calculated using :func:`exactSolution`.
"""
phi0 = self.phi_absMin + delta_phi0
dV = self.dV_from_absMin(delta_phi0)
d2V = self.d2V(phi0)
phi_r0, dphi_r0 = self.exactSolution(rmin, phi0, dV, d2V)
if abs(phi_r0 - self.phi_absMin) > abs(delta_phi_cutoff):
# The initial conditions at rmin work. Stop here.
return self._initialConditions_rval(rmin, phi_r0, dphi_r0)
if np.sign(dphi_r0) != np.sign(delta_phi0):
# The field is evolving in the wrong direction.
# Increasing r0 won't increase |delta_phi_r0|/
return rmin, phi_r0, dphi_r0
# Find the smallest r0 such that delta_phi_r0 > delta_phi_cutoff
r = rmin
while np.isfinite(r):
rlast = r
r *= 10
phi, dphi = self.exactSolution(r, phi0, dV, d2V)
if abs(phi - self.phi_absMin) > abs(delta_phi_cutoff):
break
# Now find where phi - self.phi_absMin = delta_phi_cutoff exactly
def deltaPhiDiff(r_):
p = self.exactSolution(r_, phi0, dV, d2V)[0]
return abs(p - self.phi_absMin) - abs(delta_phi_cutoff)
r0 = optimize.brentq(deltaPhiDiff, rlast, r, disp=False)
phi_r0, dphi_r0 = self.exactSolution(r0, phi0, dV, d2V)
return self._initialConditions_rval(r0, phi_r0, dphi_r0)
def equationOfMotion(self, y, r):
"""
Used to integrate the bubble wall.
"""
return np.array([y[1], self.dV(y[0])-self.alpha*y[1]/r])
_integrateProfile_rval = namedtuple(
"integrateProfile_rval", "r y convergence_type")
def integrateProfile(self, r0, y0, dr0,
epsfrac, epsabs, drmin, rmax, *eqn_args):
R"""
Integrate the bubble wall equation:
.. math::
\frac{d^2\phi}{dr^2} + \frac{\alpha}{r}\frac{d\phi}{dr} =
\frac{dV}{d\phi}.
The integration will stop when it either overshoots or undershoots
the false vacuum minimum, or when it converges upon the false vacuum
minimum.
Parameters
----------
r0 : float
The starting radius for the integration.
y0 : array_like
The starting values [phi(r0), dphi(r0)].
dr0 : float
The starting integration stepsize.
epsfrac, epsabs : float
The error tolerances used for integration. This is fed into
:func:`helper_functions.rkqs` and is used to test for convergence.
drmin : float
The minimum allowed value of `dr` before raising an error.
rmax : float
The maximum allowed value of `r-r0` before raising an error.
eqn_args : tuple
Extra arguments to pass to :func:`equationOfMotion`. Useful for
subclasses.
Returns
-------
r : float
The final radius.
y : array_like
The final field values [phi, dphi]
convergence_type : str
Either 'overshoot', 'undershoot', or 'converged'.
Raises
------
helper_functions.IntegrationError
"""
dr = dr0
# dY is the ODE that we use
def dY(y,r,args=eqn_args):
return self.equationOfMotion(y,r,*args)
dydr0 = dY(y0, r0)
ysign = | np.sign(y0[0]-self.phi_metaMin) | numpy.sign |
import dgl
import numpy as np
import torch as th
import torch.nn as nn
import dgl.function as fn
import torch.nn.functional as F
from . import BaseModel, register_model
from dgl.nn.functional import edge_softmax
import torch.nn.functional as F
@register_model('KGCN')
class KGCN(BaseModel):
r"""
Description
-----------
This module KGCN was introduced in `KGCN <https://dl.acm.org/doi/10.1145/3308558.3313417>`__.
It included two parts:
- Aggregate the entity representation and its neighborhood representation into the entity's embedding.
The message function is defined as follow:
:math:`\mathrm{v}_{\mathcal{N}(v)}^{u}=\sum_{e \in \mathcal{N}(v)} \tilde{\pi}_{r_{v, e}}^{u} \mathrm{e}`
where :math:`\mathrm{e}` is the representation of entity,
:math:`\tilde{\pi}_{r_{v, e}}^{u}` is the scalar weight on the edge from entity to entity,
the result :math:`\mathrm{v}_{\mathcal{N}(v)}^{u}` saves message which is passed from neighbor nodes
There are three types of aggregators.
Sum aggregator takes the summation of two representation vectors,
Concat aggregator concatenates the two representation vectors and
Neighbor aggregator directly takes the neighborhood representation of entity as the output representation
:math:`a g g_{s u m}=\sigma\left(\mathbf{W} \cdot\left(\mathrm{v}+\mathrm{v}_{\mathcal{S}(v)}^{u}\right)+\mathbf{b}\right)`
:math:`agg $_{\text {concat }}=\sigma\left(\mathbf{W} \cdot \text{concat}\left(\mathrm{v}, \mathrm{v}_{\mathcal{S}(v)}^{u}\right)+\mathbf{b}\right)$`
:math:`\text { agg }_{\text {neighbor }}=\sigma\left(\mathrm{W} \cdot \mathrm{v}_{\mathcal{S}(v)}^{u}+\mathrm{b}\right)`
In the above equations, :math:`\sigma\left` is the nonlinear function and
:math:`\mathrm{W}` and :math:`\mathrm{b}` are transformation weight and bias.
the representation of an item is bound up with its neighbors by aggregation
- Obtain scores using final entity representation and user representation
The final entity representation is denoted as :math:`\mathrm{v}^{u}`,
:math:`\mathrm{v}^{u}` do dot product with user representation :math:`\mathrm{u}`
can obtain the probability. The math formula for the above function is:
:math:`$\hat{y}_{u v}=f\left(\mathbf{u}, \mathrm{v}^{u}\right)$`
Parameters
----------
g : DGLGraph
A knowledge Graph preserves relationships between entities
args : Config
Model's config
"""
@classmethod
def build_model_from_args(cls, args, g):
return cls(g, args)
def __init__(self, g, args):
super(KGCN, self).__init__()
self.g = g
self.args = args
self.in_dim = args.in_dim
self.out_dim = args.out_dim
self.entity_emb_matrix = nn.Parameter(th.FloatTensor(self.g.num_nodes(), self.in_dim))
self.relation_emb_matrix = nn.Parameter(th.FloatTensor(args.n_relation, self.in_dim))
self.user_emb_matrix = nn.Parameter(th.FloatTensor(args.n_user, self.in_dim))
if self.args.aggregate == 'CONCAT':
self.agg = nn.Linear(self.in_dim*2, self.out_dim)
else:
self.agg = nn.Linear(self.in_dim, self.out_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.entity_emb_matrix, -1, 1)
nn.init.uniform_(self.relation_emb_matrix, -1, 1)
nn.init.uniform_(self.user_emb_matrix, -1, 1)
def aggregate(self):
r"""
Description
-----------
Aggregate the entity representation and its neighborhood representation
Returns
-------
"""
self.sub_g.update_all(fn.u_mul_e('embedding', 'weight', 'm'),fn.sum('m', 'ft'))
self.userList = []
self.labelList = []
embeddingList = []
for i in range(len(self.data)):
weightIndex = np.where(self.itemlist==int(self.sub_g.dstdata['_ID'][i]))
if self.args.aggregate == 'SUM':
embeddingList.append(self.sub_g.dstdata['embedding'][i] + self.sub_g.dstdata['ft'][i][weightIndex])
elif self.args.aggregate == 'CONCAT':
embeddingList.append(th.cat([self.sub_g.dstdata['embedding'][i], self.sub_g.dstdata['ft'][i][weightIndex].squeeze(0)],dim=-1))
elif self.args.aggregate == 'NEIGHBOR':
embeddingList.append(self.sub_g.dstdata['embedding'][i])
self.userList.append(int(self.user_indices[weightIndex]))
self.labelList.append(int(self.labels[weightIndex]))
self.sub_g.dstdata['embedding'] = th.stack(embeddingList).squeeze(1)
output = F.dropout(self.sub_g.dstdata['embedding'],p=0)
if self.layer+1 == len(self.blocks):
self.item_embeddings = th.tanh(self.agg(output))
else:
self.item_embeddings = th.relu(self.agg(output))
def get_score(self):
r"""
Description
-----------
Obtain scores using final entity representation and user representation
Returns
-------
"""
self.user_embeddings = self.user_emb_matrix[ | np.array(self.userList) | numpy.array |
"""
blends
~~~~~~
Blending operations to use when combining two sets of image data.
Many of these are taken from:
* http://www.deepskycolors.com/archive/2010/04/21/
formulas-for-Photoshop-blending-modes.html
* http://www.simplefilter.de/en/basics/mixmods.html
Basic Usage: Blends
===================
The blending operation functions (blends) are used to blend two sets
of image data together. Using a blending operation (an "operation")
works like any other function all. The parameters follow the Blending
Operation protocol.
Usage::
>>> import numpy as np
>>> a = np.array([[[0., .25, .5, .75, 1.], [0., .25, .5, .75, 1.]]])
>>> b = np.array([[[1., 75, .5, .25, 0.], [1., 75, .5, .25, 0.]]])
>>> darker(a, b)
array([[[0. , 0.25, 0.5 , 0.25, 0. ],
[0. , 0.25, 0.5 , 0.25, 0. ]]])
While the functions themselves are fairly simple, they are given some
extra functionality by decorators. Ultimately the true protocol for the
operations is:
:param a: The image data from the existing image.
:param b: The image data from the blending image.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) (From @can_fade.) How much the blend
should impact the final output. This is a percentage, so the
range of valid values are 0 <= x <= 1.
:param mask: (Optional.) (From @mcan_mask.) An array of data used
to mask the blending operation. This is also a percentage, so a
value of one in the mask means that pixel is fully affected by
the operation. A value of zero means the pixel is not affected
by the operation.
:return: A :class:numpy.ndarray object.
:rtype: numpy.ndarray
Colorize, Array Shape, and Color Channels
=========================================
The blends themselves don't care about the dimensionality of the given
arrays. It just needs the two arrays to have the same shape by the
time it does the blend. While this was originally written for image
data, to the algorithms themselves, it's all just floating-point math.
However, there is one case where a bias towards image data shows up:
* You pass two arrays with differing shapes.
* The size of their last dimension is different.
* One of the two arrays has a last dimension with size three.
To perform the blending algorithm, the two arrays must be the same
shape. In most cases, differences between the two shapes will be
handled through the ```will_match_size``` decorator, which adds zeros
to the smaller array to make their sizes match. However, in the case
described above, something different happens.
Since color image data often has a last dimension size of three,
representing color channels, the case above is intercepted by the
```will_colorize``` decorator. That decorator assumes the array
that doesn't have a last dimension size of three is single channel
image data ("grayscale") and will add a new last dimension of size
three. The values will be the original single value repeated three
times. To demonstrate::
>>> from imgblender.common import will_colorize
>>> a = np.array([
... [1.0, 0.5, 0.0, ],
... [0.5, 0.0, 0.5, ],
... [0.0, 0.5, 1.0, ],
... ])
>>> b = np.array([
... [[0, 0, 0], [0, 0, 0], [0, 0, 0], ],
... [[0, 0, 0], [0, 0, 0], [0, 0, 0], ],
... [[0, 0, 0], [0, 0, 0], [0, 0, 0], ],
... ])
>>>
>>> @will_colorize
... def spam(a, b):
... return a
...
>>> a_ = spam(a, b)
>>> a_
array([[[1. , 1. , 1. ],
[0.5, 0.5, 0.5],
[0. , 0. , 0. ]],
<BLANKLINE>
[[0.5, 0.5, 0.5],
[0. , 0. , 0. ],
[0.5, 0.5, 0.5]],
<BLANKLINE>
[[0. , 0. , 0. ],
[0.5, 0.5, 0.5],
[1. , 1. , 1. ]]])
>>> a.shape
(3, 3)
>>> a_.shape
(3, 3, 3)
The value of a returned by ```spam()``` in the demonstration has an
extra dimension of size three added, and the values are three copies
of the values in the original a.
This can be turned off by passing ```False``` ro the ```colorize```
parameter of the blend.
"""
import numpy as np
from imgblender.common import (can_fade, can_mask, will_clip,
will_colorize, will_match_size)
# Simple replacement blends.
@can_mask
@can_fade
@will_match_size
@will_colorize
def replace(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Simple replacement filter. Can double as an opacity filter
if passed can_fade amount, but otherwise this will just replace the
values in a with the values in b.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
return b
# Darker/burn blends.
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def darker(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Replaces values in the existing image with values from the
blending image when the value in the blending image is darker.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
ab = a.copy()
ab[b < a] = b[b < a]
return ab
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def multiply(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Multiplies the values of the two images, leading to darker
values. This is useful for shadows and similar situations.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
return a * b
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def color_burn(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Similar to multiply, but is darker and produces higher
contrast.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
m = b != 0
ab = np.zeros_like(a)
ab[m] = 1 - (1 - a[m]) / b[m]
ab[~m] = 0
return ab
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def linear_burn(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Similar to multiply, but is darker, produces less saturated
colors than color burn, and produces more contrast in the shadows.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
return a + b - 1
# Lighter/dodge blends.
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def lighter(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Replaces values in the existing image with values from the
blending image when the value in the blending image is lighter.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
ab = a.copy()
ab[b > a] = b[b > a]
return ab
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def screen(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Performs an inverse multiplication on the colors from the two
images then inverse the colors again. This leads to overall
brighter colors and is the opposite of multiply.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
rev_a = 1 - a
rev_b = 1 - b
ab = rev_a * rev_b
return 1 - ab
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def color_dodge(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Similar to screen, but brighter and decreases the contrast.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
ab = np.ones_like(a)
ab[b != 1] = a[b != 1] / (1 - b[b != 1])
return ab
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def linear_dodge(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Similar to screen but produces stronger results.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
return a + b
# Inversion blends.
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def difference(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Takes the absolute value of the difference of the two values.
This is often useful in creating complex patterns or when
aligning two images.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
return | np.abs(a - b) | numpy.abs |
# -*- coding: utf-8 -*-
"""
This script creates the iput files used for testing BELLA
"""
__version__ = '1.0'
__author__ = '<NAME>'
import sys
import pandas as pd
import numpy as np
import numpy.matlib
sys.path.append(r'C:\BELLA')
from src.CLA.lampam_functions import calc_lampam
from src.BELLA.panels import Panel
from src.BELLA.multipanels import MultiPanel
from src.BELLA.constraints import Constraints
from src.BELLA.obj_function import ObjFunction
from src.BELLA.materials import Material
from src.BELLA.format_pdl import convert_sst_to_ss
from src.BELLA.save_set_up import save_constraints_BELLA
from src.BELLA.save_set_up import save_multipanel
from src.BELLA.save_set_up import save_objective_function_BELLA
from src.BELLA.save_set_up import save_materials
from src.guidelines.one_stack import check_lay_up_rules
from src.guidelines.one_stack import check_ply_drop_rules
from src.divers.excel import delete_file, autofit_column_widths
sheet = 'SST-40-60'
sheet = 'SST-80-120'
sheet = 'SST-120-180'
filename_input = '/BELLA/input-files/SST.xlsx'
filename_res = 'input_file_' + sheet + '.xlsx'
# check for authorisation before overwriting
delete_file(filename_res)
# number of panels
if sheet == 'SST-40-60': n_panels = 6
elif sheet == 'SST-80-120': n_panels = 11
elif sheet == 'SST-120-180': n_panels = 16
### Design guidelines ---------------------------------------------------------
constraints_set = 'C0'
constraints_set = 'C1'
## lay-up rules
# set of admissible fibre orientations
set_of_angles = np.array([-45, 0, 45, 90], dtype=int)
set_of_angles = np.array([
-45, 0, 45, 90, +30, -30, +60, -60, 15, -15, 75, -75], dtype=int)
sym = True # symmetry rule
oopo = False # out-of-plane orthotropy requirements
if constraints_set == 'C0':
bal = False # balance rule
rule_10_percent = False # 10% rule
diso = False # disorientation rule
contig = False # contiguity rule
dam_tol = False # damage-tolerance rule
else:
bal = True
rule_10_percent = True
diso = True
contig = True
dam_tol = True
rule_10_Abdalla = True # 10% rule restricting LPs instead of ply percentages
percent_Abdalla = 10 # percentage limit for the 10% rule applied on LPs
combine_45_135 = True # True if restriction on +-45 plies combined for 10% rule
percent_0 = 10 # percentage used in the 10% rule for 0 deg plies
percent_45 = 0 # percentage used in the 10% rule for +45 deg plies
percent_90 = 10 # percentage used in the 10% rule for 90 deg plies
percent_135 = 0 # percentage used in the 10% rule for -45 deg plies
percent_45_135 =10 # percentage used in the 10% rule for +-45 deg plies
delta_angle = 45 # maximum angle difference for adjacent plies
n_contig = 5 # maximum number of adjacent plies with the same fibre orientation
dam_tol_rule = 1 # type of damage tolerance rule
## ply-drop rules
covering = True # covering rule
n_covering = 1 # number of plies ruled by covering rule at laminate surfaces
pdl_spacing = True # ply drop spacing rule
min_drop = 2 # Minimum number of continuous plies between ply drops
constraints = Constraints(
sym=sym,
bal=bal,
oopo=oopo,
dam_tol=dam_tol,
dam_tol_rule=dam_tol_rule,
covering=covering,
n_covering=n_covering,
rule_10_percent=rule_10_percent,
rule_10_Abdalla=rule_10_Abdalla,
percent_Abdalla=percent_Abdalla,
percent_0=percent_0,
percent_45=percent_45,
percent_90=percent_90,
percent_135=percent_135,
percent_45_135=percent_45_135,
combine_45_135=combine_45_135,
diso=diso,
contig=contig,
n_contig=n_contig,
delta_angle=delta_angle,
set_of_angles=set_of_angles,
min_drop=min_drop,
pdl_spacing=pdl_spacing)
### Objective function parameters ---------------------------------------------
# Coefficient for the 10% rule penalty
coeff_10 = 1
# Coefficient for the contiguity constraint penalty
coeff_contig = 1
# Coefficient for the disorientation constraint penalty
coeff_diso = 10
# Coefficient for the out-of-plane orthotropy penalty
coeff_oopo = 1
# Coefficient for the ply drop spacing guideline penalty
coeff_spacing = 1
# Lamination-parameter weightings in panel objective functions
# (In practice these weightings can be different for each panel)
optimisation_type = 'AD'
if optimisation_type == 'A':
if all(elem in {0, +45, -45, 90} for elem in constraints.set_of_angles):
lampam_weightings = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
else:
lampam_weightings = np.array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
elif optimisation_type == 'D':
if all(elem in {0, +45, -45, 90} for elem in constraints.set_of_angles):
lampam_weightings = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0])
else:
lampam_weightings = | np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1]) | numpy.array |
import gc
import six
import numpy as np
from abc import ABCMeta, abstractmethod
from .base import BaseEstimator
class BaseSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
def __init__(self, kernel='linear', degree=3, C=1.0, epsilon=1e-3, max_iter=100):
self.max_iter = max_iter
self._kernel = kernel
self.degree = degree
self.C = C
self.epsilon = epsilon
# 显式的初始化变量
def init(self, X: np.ndarray, y: np.ndarray):
self.m, self.n = X.shape
self.X = X
self.y = y
self.b = 0.0
self.K = self.kernel_mat(X, X) # 全部计算好的kernel
self.alpha = np.zeros(self.m)
self.g_v = self.g_vec()
self.E = self.g_v - self.y # 将Ei保存在一个列表里
# g(x),输入xi(X[i])
def g(self, i):
in_sigma = self.alpha * self.y * self.K[i]
self.g_v[i] = np.sum(in_sigma) + self.b
return self.g_v[i]
# vec结尾的是函数,v结尾的是缓存向量
def g_vec(self):
before_sigma = self.K * self.alpha * self.y
return np.sum(before_sigma, axis=-1) + self.b
# E(x)为g(x)对输入x的预测值和y的差
def _e(self, i):
return self.g(i) - self.y[i]
# 核函数
def kernel(self, x1: np.ndarray, x2: np.ndarray):
if self._kernel == 'linear':
return np.sum(x1 * x2)
elif self._kernel == 'poly':
return (np.sum(x1 * x2) + 1) ** self.degree
return 0
# 向量化kernel,一次获得xi对所有x的
def kernel_vec(self, x: np.ndarray): # todo: 待添加更多kernel
if self._kernel == 'linear':
return np.sum(self.X * x, axis=-1)
elif self._kernel == 'poly':
return (np.sum(self.X * x, axis=-1) + 1) ** self.degree
return None
# 直接计算所有kernel,以后直接调用
def kernel_mat(self, X1: np.ndarray, X2: np.ndarray):
x1 = X1[np.newaxis, ...]
x2 = X2[:, np.newaxis, :]
if self._kernel == 'linear':
return np.sum(x1 * x2, axis=-1) # 广播,对最后一个维度求和
elif self._kernel == 'poly':
return (np.sum(x1 * x2, axis=-1) + 1) ** self.degree
return None
# 选择α1 α2,返回index
def select_alpha(self):
_a = self.alpha
# 得到mask
con1, con2 = (_a > 0), (_a < self.C)
# yi*g(xi)
ygx = self.y * self.g_v
# αi == 0 and yi*gxi ≥ 1-ε KKT条件
err1 = ygx - 1 + self.epsilon
err1[(con1 & (err1 <= 0)) | (~con1 & (err1 > 0))] = 0 # 不在此类或符合置0
# 0 < αi < C and abs(yi*gxi - 1) ≤ ε
err2 = np.abs(ygx - 1) - self.epsilon
err2[~con1 | ~con2] = 0 # 置 αi ≤ 0 and αi ≥ C 的为0
# αi == C and yi*gxi ≤ 1+ε
err3 = ygx - 1 - self.epsilon
err3[(con2 & (err3 >= 0)) | (~con2 & (err3 < 0))] = 0
# 计算总error,排序获得index
err = err1 ** 2 + err2 ** 2 + err3 ** 2
# α1为违反KKT条件最严重的点
i1 = | np.argmax(err) | numpy.argmax |
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
import numpy as np
X = | np.array([[1,2],[1.5, 1.8],[5, 8],[8,8],[1, 0.6],[9,11]]) | numpy.array |
import pandas as pd
import os
import re
import numpy as np
import pprint
import logging
'''
@Author: <NAME>
This script extracts voting members using the minutes of FOMC meetings, and then appends a manual verification for certain values.
'''
def main():
voter_df = get_voters()
get_errors(voter_df)
merge_error_correction(voter_df)
#merge_voting_members_with_alternatives()
def get_voters():
df = pd.read_excel("../data/fomc_dissents_data.xlsx",skiprows=3)
df["Date"] = df["FOMC Meeting"].apply(lambda x:str(x).split(" ")[0])
df['FOMC Votes'] = df['FOMC Votes'].apply(lambda x:0 if np.isnan(x) else x)
df['date'] = pd.to_datetime(df["Date"])
df['start_date'] = df['date'] - pd.Timedelta('1 days')
df['start_date']=df['start_date'].dt.date
df['date']=df['date'].dt.date
df[['date','start_date']].head()
voter_df = pd.DataFrame()
for index,row in df.iterrows():
voters = []
num_voters = int(row['FOMC Votes'])
date_path = '../../../collection/python/data/transcript_raw_text/{}.txt'.format(row['Date'])
if not os.path.exists(date_path):
print("Date not found")
date_path = '../../../collection/python/data/transcript_raw_text/{}.txt'.format(row['start_date'])
if not os.path.exists(date_path):
print("Alternative date not found")
continue
else:
print('Process alternative date')
with open(date_path) as f:
broken = False
broken_starts = 0
broken_ends = 0
lines = f.readlines()
'''First Check For Broken Title'''
#print("CHECKING USING FRAGMENT HEURISTIC")
for line in lines[:200]:
if line.strip():
if broken_ends==0:
title_frag = re.match(r'^(?:PRESENT: |PRESENT. )?(?:Mr.|Ms.|Mt.|Mrs. )$',line.strip())
if title_frag:
if not broken:
broken = True
#print("Broken Begining")
#print(title_frag.group(0))
title_frag_string = str(title_frag.group(0)).replace("PRESENT: ","")
voters.append(title_frag_string)
broken_starts+=1
continue
if broken and broken_ends<len(voters):
name_fragment = re.match('^[A-Z][a-z][A-Za-z]*',line.strip())
if name_fragment:
voters[broken_ends] = voters[broken_ends]+" "+str(name_fragment.group(0))
broken_ends+=1
'''Check using Mr. Regex'''
if len(voters)==0:
#print("CHECKING TITLE REGEX")
for line in lines[:200]:
'''Then check for valid input'''
voter_line = re.findall(r'(?:Mr.|Ms.|Mrs.) [A-Z][a-zA-Z]*',line.strip())
if voter_line:
#print(voter_line)
voters.append(voter_line[0])
if len(voters)>=num_voters:
break
'''Check Last Name Regex'''
if len(voters) == 0:
#print("Checking POST-PRESENT-NAME HEURISTIC")
found_present = False
for line in lines[:200]:
if "PRESENT:" in line.strip() or "PRESENT." in line.strip():
found_present = True
present_line = line.split(",")[0].strip().replace("PRESENT","")
name_text = re.match('[A-Z][a-z]*\s?(?:[A-Z][a-z]*)?',present_line)
if name_text:
voters.append(name_text.group(0))
continue
if found_present:
#print(line)
name_text = re.match('[A-Z][a-z]*\s?(?:[A-Z][a-z]*)?',line.split(",")[0].strip())
if name_text:
voters.append(name_text.group(0))
if len(voters)>=num_voters:
break
#print('Date:{}'.format(row['Date']))
#print("Broken Status:{}".format(broken))
#print("Voter Number:{}".format(num_voters))
#print("Voters Found:{}".format(len(voters)))
#pprint.pprint(voters)
voter_df = voter_df.append({
"Date":row['FOMC Meeting'],
"voters_expected":num_voters,
"voters_observed":len(voters),
"Voters":voters if num_voters==len(voters) else None,
},ignore_index=True)
#print("="*50)
print(voter_df)
return voter_df
def get_errors(voter_df):
print(len(voter_df[voter_df["Voters"].isna()]))
voter_errors = voter_df[voter_df["Voters"].isna()].reset_index(drop=True)
voter_errors.to_csv("../output/voter_errors.csv",index=False)
def merge_error_correction(voter_df):
correction_df = pd.read_csv("../data/voter_corrections.csv")
correction_df['Date'] = pd.to_datetime(correction_df['Date'])
voter_df['Date'] = pd.to_datetime(voter_df['Date'])
voter_df = pd.concat([voter_df,correction_df])
voter_df = voter_df.drop_duplicates(['Date'], keep="last").sort_values(by="Date")
voter_df = voter_df[(voter_df['Date'].dt.year>1987)&(voter_df['Date'].dt.year<2010)]
voter_df.to_csv("../output/voting_members.csv",index=False)
def merge_voting_members_with_alternatives():
voting_df = pd.read_csv("../output/voting_members.csv")
alt_df = pd.read_csv("../output/alternative_outcomes_and_corpus.csv")
alt_df['date'] = pd.to_datetime(alt_df['date']).dt.date
merge_df = pd.merge(alt_df,voting_df,left_on="date",right_on="Date",how="outer")
excel_df = pd.read_excel("../data/fomc_dissents_data.xlsx",skiprows=3)
excel_df['FOMC Votes'] = excel_df['FOMC Votes'].apply(lambda x:0 if | np.isnan(x) | numpy.isnan |
"""
decoding_analys.py
This script contains functions for decoding analysis.
Authors: <NAME>
Date: January, 2021
Note: this code uses python 3.7.
"""
import itertools
import logging
import numpy as np
import pandas as pd
import scipy.stats as scist
from util import logger_util, gen_util, logreg_util, math_util, rand_util
from sess_util import sess_gen_util
from analysis import misc_analys
logger = logging.getLogger(__name__)
MAX_SIMULT_RUNS = 25000
TAB = " "
#############################################
def get_decoding_data(sess, analyspar, stimpar, comp="Dori", ctrl=False):
"""
get_decoding_data(sess, analyspar, stimpar)
Retrieves data for decoding.
Required args:
- sess (Session):
Session object
- analyspar (AnalysPar):
named tuple containing analysis parameters
- stimpar (StimPar):
named tuple containing stimulus parameters
Optional args:
- comp (str):
comparison used to define classes ("Dori" or "Eori")
default: "Dori"
- ctrl (bool):
if True, the number of examples per class for the unexpected data
is returned (applies to "Dori" comp only).
default: False
Returns:
- all_input_data (3D array):
input data, dims: seq x frames x ROIs
- all_target_data (1D array):
class target for each input sequence
- ctrl_ns (list):
number of examples per class for the unexpected data
(None if it doesn't apply)
"""
if stimpar.stimtype != "gabors":
raise ValueError("Expected stimpar.stimtype to be 'gabors'.")
if comp == "Dori":
unexp = 0
ctrl_ns = []
elif comp == "Uori":
unexp = 1
ctrl = False
ctrl_ns = False
else:
gen_util.accepted_values_error("comp", comp, ["Dori", "Uori"])
gab_oris = sess_gen_util.filter_gab_oris(comp[0], stimpar.gab_ori)
stim = sess.get_stim(stimpar.stimtype)
all_input_data = []
all_target_data = []
for g, gab_ori in enumerate(gab_oris):
segs = stim.get_segs_by_criteria(
gabfr=stimpar.gabfr, gabk=stimpar.gabk, gab_ori=gab_ori,
unexp=unexp, remconsec=False, by="seg")
fr_ns = stim.get_fr_by_seg(
segs, start=True, fr_type="twop"
)["start_frame_twop"]
# sample as many sequences as are usable for unexpected data
if ctrl:
ctrl_gab_ori = sess_gen_util.get_unexp_gab_ori(gab_ori)
segs_ctrl = stim.get_segs_by_criteria(
gabfr=stimpar.gabfr, gabk=stimpar.gabk, gab_ori=ctrl_gab_ori,
unexp=1, remconsec=False, by="seg")
fr_ns_ctrl = stim.get_fr_by_seg(
segs_ctrl, start=True, ch_fl=[stimpar.pre, stimpar.post],
fr_type="twop"
)["start_frame_twop"]
ctrl_ns.append(len(fr_ns_ctrl))
ori_data_df = stim.get_roi_data(
fr_ns, stimpar.pre, stimpar.post, rem_bad=analyspar.rem_bad,
scale=analyspar.scale
)
# seq x frames x ROIs
ori_data = np.transpose(
gen_util.reshape_df_data(ori_data_df, squeeze_cols=True),
[1, 2, 0]
)
all_input_data.append(ori_data)
all_target_data.append(np.full(len(ori_data), g))
all_input_data = np.concatenate(all_input_data, axis=0)
all_target_data = np.concatenate(all_target_data)
return all_input_data, all_target_data, ctrl_ns
#############################################
def get_df_stats(scores_df, analyspar):
"""
get_df_stats(scores_df, analyspar)
Returns statistics (mean/median and error) for each data column.
Required args:
- scores_df (pd.DataFrame):
dataframe where each column contains data for which statistics
should be measured
- analyspar (AnalysPar):
named tuple containing analysis parameters
Returns:
- stats_df (pd.DataFrame):
dataframe with only one data row containing data stats for each
original column under "{col}_stat" and "{col}_err"
"""
# take statistics
stats_df = pd.DataFrame()
for col in scores_df.columns:
# get stats
stat = math_util.mean_med(
scores_df[col].to_numpy(), stats=analyspar.stats,
nanpol="omit"
)
err = math_util.error_stat(
scores_df[col].to_numpy(), stats=analyspar.stats,
error=analyspar.error, nanpol="omit"
)
if isinstance(err, np.ndarray):
err = err.tolist()
stats_df = gen_util.set_object_columns(
stats_df, [f"{col}_err"], in_place=True
)
stats_df.loc[0, f"{col}_stat"] = stat
stats_df.at[0, f"{col}_err"] = err
return stats_df
#############################################
def add_CI_p_vals(shuffle_df, stats_data_df, permpar):
"""
add_CI_p_vals(shuffle_df, stats_data_df, permpar)
Returns confidence intervals from shuffled data, and p-values for real data.
Required args:
- shuffle_df (pd.DataFrame):
dataframe where each row contains data for different data
shuffles, and each column contains data to use to construct null
distributions.
- stats_data_df (pd.DataFrame):
dataframe with only one data row containing real data stats for
each shuffle_df column. Columns should have the same names as
shuffle_df, as "{col}_stat" and "{col}_err".
- permpar (PermPar):
named tuple containing permutation parameters
Returns:
- stats_df (pd.DataFrame):
dataframe with only one data row containing real data stats,
shuffled data stats, and p-values for real data test set results.
"""
if len(stats_data_df) != 1:
raise ValueError("Expected stats_data_df to have length 1.")
multcomp = 1 if not permpar.multcomp else permpar.multcomp
p_thresh_corr = permpar.p_val / multcomp
percs = math_util.get_percentiles(
CI=(1 - p_thresh_corr), tails=permpar.tails
)[0]
percs = [percs[0], 50, percs[1]]
stats_df = pd.DataFrame()
for col in shuffle_df.columns:
# add real data
stat_key = f"{col}_stat"
err_key = f"{col}_err"
if (stat_key not in stats_data_df.columns or
err_key not in stats_data_df.columns):
raise KeyError(
f"{stat_key} and {err_key} not found stats_data_df."
)
stats_df[stat_key] = stats_data_df[stat_key]
stats_df[err_key] = stats_data_df[err_key]
# get and add null CI data
shuffle_data = shuffle_df[col].to_numpy()
shuffle_data = shuffle_data[~np.isnan(shuffle_data)] # remove NaN data
rand_util.check_n_rand(len(shuffle_data), p_thresh_corr)
null_CI = [np.percentile(shuffle_data, p) for p in percs]
null_key = f"{col}_null_CIs"
stats_df = gen_util.set_object_columns(
stats_df, [null_key], in_place=True
)
stats_df.at[0, null_key] = null_CI
# get and add p-value
if "test" in col:
perc = scist.percentileofscore(
shuffle_data, stats_data_df.loc[0, stat_key], kind='mean'
)
if perc > 50:
perc = 100 - perc
p_val = perc / 100
stats_df.loc[0, f"{col}_p_vals"] = p_val
return stats_df
#############################################
def collate_results(sess_data_stats_df, shuffle_dfs, analyspar, permpar):
"""
collate_results(sess_data_stats_df, shuffle_dfs, analyspar, permpar)
Return results collated from real data and shuffled data dataframes,
with statistics, null distributions and p-values added.
Required args:
- sess_data_stats_df (pd.DataFrame):
dataframe where each row contains statistics for a session,
and where columns include data descriptors, and logistic regression
scores for different data subsets
(e.g. "train", "val", "test").
- shuffle_dfs (list):
dataframes for each session, where each row contains data for
different data shuffles, and each column contains data to use to
construct null distributions.
- analyspar (AnalysPar):
named tuple containing analysis parameters
- permpar (PermPar):
named tuple containing permutation parameters
Returns:
- stats_df (pd.DataFrame):
dataframe with real data statistics, shuffled data confidence
intervals and p-values for test set data.
"""
# check shuffle_dfs
shuffle_df_lengths = [len(shuffle_df) for shuffle_df in shuffle_dfs]
if len(np.unique(shuffle_df_lengths)) != 1:
raise ValueError("All shuffle_dfs must have the same length.")
stat_cols = [
col for col in shuffle_dfs[0].columns
if col.split("_")[0] in ["train", "val", "test"]
]
main_cols = [
col for col in shuffle_dfs[0].columns
if not (col in stat_cols + ["shuffle"])
]
# take statistics across session scores
data_stats_df = pd.DataFrame()
for stat_col in stat_cols:
data_stats_df[stat_col] = sess_data_stats_df[f"{stat_col}_stat"]
data_stats_df = get_df_stats(data_stats_df, analyspar)
# take statistics across session shuffles at the same index
shuffle_dfs_concat = pd.concat(shuffle_dfs)
stat_shuffle_dfs = shuffle_dfs_concat.loc[:, stat_cols]
by_row_index = stat_shuffle_dfs.groupby(stat_shuffle_dfs.index)
if analyspar.stats == "mean":
shuffle_df = by_row_index.mean()
elif analyspar.stats == "median":
shuffle_df = by_row_index.median()
else:
gen_util.accepted_values_error(
"analyspar.stats", analyspar.stats, ["mean", "median"]
)
temp_stats_df = add_CI_p_vals(shuffle_df, data_stats_df, permpar)
# add in main data columns
stats_df = pd.DataFrame(columns=main_cols + temp_stats_df.columns.tolist())
sort_order = np.argsort(sess_data_stats_df["sessids"].tolist())
for col in main_cols:
data_df_values = sess_data_stats_df[col].unique().tolist()
shuffle_df_values = shuffle_dfs_concat[col].unique().tolist()
if data_df_values != shuffle_df_values:
raise ValueError(
"Expected data_df and shuffle_df non-statistic columns, "
"except shuffle, to contain the same sets of values."
)
# sort by session ID
values = sess_data_stats_df[col].tolist()
stats_df.at[0, col] = values = [values[v] for v in sort_order]
for col in temp_stats_df.columns:
stats_df.at[0, col] = temp_stats_df.loc[0, col]
return stats_df
#############################################
def run_sess_logreg(sess, analyspar, stimpar, logregpar, n_splits=100,
n_shuff_splits=300, seed=None, parallel=False):
"""
run_sess_logreg(sess, analyspar, stimpar, logregpar)
Runs logistic regressions on a session (real data and shuffled), and
returns statistics dataframes.
Required args:
- sess (Session):
Session object
- analyspar (AnalysPar):
named tuple containing analysis parameters
- stimpar (StimPar):
named tuple containing stimulus parameters
- logregpar (LogRegPar):
named tuple containing logistic regression parameters
Optional args:
- n_splits (int):
number of data splits to run logistic regressions on
default: 100
- n_shuff_splits (int):
number of shuffled data splits to run logistic regressions on
default: 300
- seed (int):
seed value to use. (-1 treated as None)
default: None
- parallel (bool):
if True, some of the analysis is run in parallel across CPU cores
default: False
Returns:
- data_stats_df (pd.DataFrame):
dataframe with only one data row containing data stats for each
score and data subset.
- shuffle_df (pd.DataFrame):
dataframe where each row contains data for different data
shuffles, and each column contains data for each score and data
subset.
"""
seed = rand_util.seed_all(seed, log_seed=False, seed_now=False)
# retrieve data
input_data, target_data, ctrl_ns = get_decoding_data(
sess, analyspar, stimpar, comp=logregpar.comp, ctrl=logregpar.ctrl)
scores_df = misc_analys.get_check_sess_df([sess], None, analyspar)
common_columns = scores_df.columns.tolist()
logreg_columns = ["comp", "ctrl", "bal", "shuffle"]
# do checks
if logregpar.q1v4 or logregpar.exp_v_unexp:
raise NotImplementedError("q1v4 and exp_v_unexp are not implemented.")
if n_splits <= 0 or n_shuff_splits <= 0:
raise ValueError("n_splits and n_shuff_splits must be greater than 0.")
set_types = ["train", "test"]
score_types = ["neg_log_loss", "accuracy", "balanced_accuracy"]
set_score_types = list(itertools.product(set_types, score_types))
extrapar = dict()
for shuffle in [False, True]:
n_runs = n_shuff_splits if shuffle else n_splits
extrapar["shuffle"] = shuffle
temp_dfs = []
for b, n in enumerate(range(0, n_runs, MAX_SIMULT_RUNS)):
extrapar["n_runs"] = int( | np.min([MAX_SIMULT_RUNS, n_runs - n]) | numpy.min |
import numpy as np
from numpy import linalg as LA
import scipy.sparse as sparse
from scipy.sparse import csc_matrix
from scipy.sparse import dia_matrix
import itertools
import operator
"""
A few functions used in PDE-FIND
<NAME>. 2016
"""
##################################################################################
##################################################################################
#
# Functions for taking derivatives.
# When in doubt / nice data ===> finite differences
# \ noisy data ===> polynomials
#
##################################################################################
##################################################################################
def TikhonovDiff(f, dx, lam, d = 1):
"""
Tikhonov differentiation.
return argmin_g \|Ag-f\|_2^2 + lam*\|Dg\|_2^2
where A is trapezoidal integration and D is finite differences for first dervative
It looks like it will work well and does for the ODE case but
tends to introduce too much bias to work well for PDEs. If the data is noisy, try using
polynomials instead.
"""
# Initialize a few things
n = len(f)
f = np.matrix(f - f[0]).reshape((n,1))
# Get a trapezoidal approximation to an integral
A = np.zeros((n,n))
for i in range(1, n):
A[i,i] = dx/2
A[i,0] = dx/2
for j in range(1,i): A[i,j] = dx
e = np.ones(n-1)
D = sparse.diags([e, -e], [1, 0], shape=(n-1, n)).todense() / dx
# Invert to find derivative
g = np.squeeze(np.asarray(np.linalg.lstsq(A.T.dot(A) + lam*D.T.dot(D),A.T.dot(f))[0]))
if d == 1: return g
# If looking for a higher order derivative, this one should be smooth so now we can use finite differences
else: return FiniteDiff(g, dx, d-1)
def FiniteDiff(u, dx, d):
"""
Takes dth derivative data using 2nd order finite difference method (up to d=3)
Works but with poor accuracy for d > 3
Input:
u = data to be differentiated
dx = Grid spacing. Assumes uniform spacing
"""
n = u.size
ux = np.zeros(n, dtype=np.complex64)
if d == 1:
for i in range(1,n-1):
ux[i] = (u[i+1]-u[i-1]) / (2*dx)
ux[0] = (-3.0/2*u[0] + 2*u[1] - u[2]/2) / dx
ux[n-1] = (3.0/2*u[n-1] - 2*u[n-2] + u[n-3]/2) / dx
return ux
if d == 2:
for i in range(1,n-1):
ux[i] = (u[i+1]-2*u[i]+u[i-1]) / dx**2
ux[0] = (2*u[0] - 5*u[1] + 4*u[2] - u[3]) / dx**2
ux[n-1] = (2*u[n-1] - 5*u[n-2] + 4*u[n-3] - u[n-4]) / dx**2
return ux
if d == 3:
for i in range(2,n-2):
ux[i] = (u[i+2]/2-u[i+1]+u[i-1]-u[i-2]/2) / dx**3
ux[0] = (-2.5*u[0]+9*u[1]-12*u[2]+7*u[3]-1.5*u[4]) / dx**3
ux[1] = (-2.5*u[1]+9*u[2]-12*u[3]+7*u[4]-1.5*u[5]) / dx**3
ux[n-1] = (2.5*u[n-1]-9*u[n-2]+12*u[n-3]-7*u[n-4]+1.5*u[n-5]) / dx**3
ux[n-2] = (2.5*u[n-2]-9*u[n-3]+12*u[n-4]-7*u[n-5]+1.5*u[n-6]) / dx**3
return ux
if d > 3:
return FiniteDiff(FiniteDiff(u,dx,3), dx, d-3)
def ConvSmoother(x, p, sigma):
"""
Smoother for noisy data
Inpute = x, p, sigma
x = one dimensional series to be smoothed
p = width of smoother
sigma = standard deviation of gaussian smoothing kernel
"""
n = len(x)
y = np.zeros(n, dtype=np.complex64)
g = np.exp(-np.power(np.linspace(-p,p,2*p),2)/(2.0*sigma**2))
for i in range(n):
a = max([i-p,0])
b = min([i+p,n])
c = max([0, p-i])
d = min([2*p,p+n-i])
y[i] = np.sum(np.multiply(x[a:b], g[c:d]))/ | np.sum(g[c:d]) | numpy.sum |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import sys
import os
from unittest.mock import patch
sys.path.append(os.path.abspath('..')) # current folder is ~/tests
import numpy as np
import pandas as pd
from scipy import sparse
import pytest
from pytest import approx
from idaes.apps.uncertainty_propagation.uncertainties import quantify_propagate_uncertainty, propagate_uncertainty,clean_variable_name
from pyomo.opt import SolverFactory
from pyomo.environ import *
import pyomo.contrib.parmest.parmest as parmest
ipopt_available = SolverFactory('ipopt').available()
kaug_available = SolverFactory('k_aug').available()
dotsens_available = SolverFactory('dot_sens').available()
@pytest.mark.skipif(not ipopt_available, reason="The 'ipopt' command is not available")
@pytest.mark.skipif(not kaug_available, reason="The 'k_aug' command is not available")
@pytest.mark.skipif(not dotsens_available, reason="The 'dot_sens' command is not available")
class TestUncertaintyPropagation:
@pytest.mark.unit
def test_quantify_propagate_uncertainty1(self):
'''
It tests the function quantify_propagate_uncertainty with rooney & biegler's model.
'''
from idaes.apps.uncertainty_propagation.examples.rooney_biegler import rooney_biegler_model,rooney_biegler_model_opt
variable_name = ['asymptote', 'rate_constant']
data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0],
[4,16.0],[5,15.6],[7,19.8]],
columns=['hour', 'y'])
def SSE(model, data):
expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index)
return expr
results = quantify_propagate_uncertainty(rooney_biegler_model,rooney_biegler_model_opt, data, variable_name, SSE)
assert results.obj == approx(4.331711213656886)
np.testing.assert_array_almost_equal(np.fromiter(results.theta.values(), dtype=float), [19.142575284617866, 0.53109137696521])
assert list(results.theta.keys()) == ['asymptote', 'rate_constant']
np.testing.assert_array_almost_equal(results.gradient_f, [0.99506259, 0.945148])
assert list(results.propagation_c) == []
np.testing.assert_array_almost_equal(results.dsdp.toarray(), [[1., 0.],[ 0., 1.]])
np.testing.assert_array_almost_equal(results.cov, np.array([[6.30579403, -0.4395341], [-0.4395341, 0.04193591]]))
assert results.propagation_f == pytest.approx(5.45439337747349)
@pytest.mark.component
def test_quantify_propagate_uncertainty2(self):
'''
This is the same test as test_quantify_propagate_uncertainty1,
but with the second argument of quantify_propagate_uncertainty as Pyomo Concrete Model.
'''
from idaes.apps.uncertainty_propagation.examples.rooney_biegler import rooney_biegler_model
variable_name = ['asymptote', 'rate_constant']
data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0],
[4,16.0],[5,15.6],[7,19.8]],
columns=['hour', 'y'])
def SSE(model, data):
expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index)
return expr
model_uncertain= ConcreteModel()
model_uncertain.asymptote = Var(initialize = 15)
model_uncertain.rate_constant = Var(initialize = 0.5)
model_uncertain.obj = Objective(expr = model_uncertain.asymptote*( 1 - exp(-model_uncertain.rate_constant*10 ) ), sense=minimize)
results = quantify_propagate_uncertainty(rooney_biegler_model,model_uncertain, data, variable_name, SSE)
assert results.obj == approx(4.331711213656886)
np.testing.assert_array_almost_equal(np.fromiter(results.theta.values(), dtype=float), [19.142575284617866, 0.53109137696521])
assert list(results.theta.keys()) == ['asymptote', 'rate_constant']
np.testing.assert_array_almost_equal(results.gradient_f, [0.99506259, 0.945148])
assert list(results.propagation_c) == []
np.testing.assert_array_almost_equal(results.dsdp.toarray(), [[1., 0.],[ 0., 1.]])
np.testing.assert_array_almost_equal(results.cov, np.array([[6.30579403, -0.4395341], [-0.4395341, 0.04193591]]))
assert results.propagation_f == pytest.approx(5.45439337747349)
@pytest.mark.component
def test_propagate_uncertainty(self):
'''
It tests the function propagate_uncertainty with rooney & biegler's model.
'''
from idaes.apps.uncertainty_propagation.examples.rooney_biegler import rooney_biegler_model
variable_name = ['asymptote', 'rate_constant']
data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0],
[4,16.0],[5,15.6],[7,19.8]],
columns=['hour', 'y'])
def SSE(model, data):
expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index)
return expr
parmest_class = parmest.Estimator(rooney_biegler_model, data,variable_name,SSE)
obj, theta, cov = parmest_class.theta_est(calc_cov=True)
model_uncertain= ConcreteModel()
model_uncertain.asymptote = Var(initialize = 15)
model_uncertain.rate_constant = Var(initialize = 0.5)
model_uncertain.obj = Objective(expr = model_uncertain.asymptote*( 1 - exp(-model_uncertain.rate_constant*10 ) ), sense=minimize)
propagate_results= propagate_uncertainty(model_uncertain, theta, cov, variable_name)
np.testing.assert_array_almost_equal(propagate_results.gradient_f, [0.9950625870024135,0.9451480001755206])
assert list(propagate_results.gradient_c) == []
np.testing.assert_array_almost_equal(propagate_results.dsdp.toarray(), [[1., 0.],[ 0., 1.]])
assert list(propagate_results.propagation_c) == []
assert propagate_results.propagation_f == pytest.approx(5.45439337747349)
@pytest.mark.component
def test_propagate_uncertainty1(self):
'''
It tests the function propagate_uncertainty with
min f: p1*x1+ p2*(x2^2) + p1*p2
s.t c1: x1 + x2 = p1
c2: x2 + x3 = p2
0 <= x1, x2, x3 <= 10
p1 = 10
p2 = 5
Variables = (x1, x2, x3)
Parameters (fixed variables) = (p1, p2)
'''
### Create optimization model
m = ConcreteModel()
m.dual = Suffix(direction=Suffix.IMPORT)
m.x1 = Var()
m.x2 = Var()
m.x3 = Var()
# Define parameters
m.p1 = Var(initialize=10)
m.p2 = Var(initialize=5)
m.p1.fix()
m.p2.fix()
# Define constraints
m.con1 = Constraint(expr=m.x1 + m.x2-m.p1==0)
m.con2 = Constraint(expr=m.x2 + m.x3-m.p2==0)
# Define objective
m.obj = Objective(expr=m.p1*m.x1+ m.p2*(m.x2**2) + m.p1*m.p2, sense=minimize)
### Solve optimization model
opt = SolverFactory('ipopt',tee=True)
opt.solve(m)
### Analytic solution
'''
At the optimal solution, none of the bounds are active. As long as the active set
does not change (i.e., none of the bounds become active), the
first order optimality conditions reduce to a simple linear system.
'''
# dual variables (multipliers)
v2_ = 0
v1_ = m.p1()
# primal variables
x2_ = (v1_ + v2_)/(2 * m.p2())
x1_ = m.p1() - x2_
x3_ = m.p2() - x2_
### Analytic sensitivity
'''
Using the analytic solution above, we can compute the sensitivies of x and v to
perturbations in p1 and p2.
The matrix dx_dp constains the sensitivities of x to perturbations in p
'''
# Initialize sensitivity matrix Nx x Np
# Rows: variables x
# Columns: parameters p
dx_dp = np.zeros((3,2))
# dx2/dp1 = 1/(2 * p2)
dx_dp[1, 0] = 1/(2*m.p2())
# dx2/dp2 = -(v1 + v2)/(2 * p2**2)
dx_dp[1,1] = -(v1_ + v2_)/(2 * m.p2()**2)
# dx1/dp1 = 1 - dx2/dp1
dx_dp[0, 0] = 1 - dx_dp[1,0]
# dx1/dp2 = 0 - dx2/dp2
dx_dp[0, 1] = 0 - dx_dp[1,1]
# dx3/dp1 = 1 - dx2/dp1
dx_dp[2, 0] = 0 - dx_dp[1,0]
# dx3/dp2 = 0 - dx2/dp2
dx_dp[2, 1] = 1 - dx_dp[1,1]
'''
Similarly, we can compute the gradients df_dx, df_dp
and Jacobians dc_dx, dc_dp
'''
# Initialize 1 x 3 array to store (\partial f)/(\partial x)
# Elements: variables x
df_dx = np.zeros(3)
# df/dx1 = p1
df_dx[0] = m.p1()
# df/dx2 = p2
df_dx[1] = 2 * m.p2() * x2_
# df/dx3 = 0
# Initialize 1 x 2 array to store (\partial f)/(\partial p)
# Elements: parameters p
df_dp = np.zeros(2)
# df/dxp1 = x1 + p2
df_dp[0] = x1_ + m.p2()
# df/dp2 = x2**2 + p1
df_dp[1] = x2_**2 + m.p1()
# Initialize 2 x 3 array to store (\partial c)/(\partial x)
# Rows: constraints c
# Columns: variables x
dc_dx = np.zeros((2,3))
# dc1/dx1 = 1
dc_dx[0,0] = 1
# dc1/dx2 = 1
dc_dx[0,1] = 1
# dc2/dx2 = 1
dc_dx[1,1] = 1
# dc2/dx3 = 1
dc_dx[1,2] = 1
# Remaining entries are 0
# Initialize 2 x 2 array to store (\partial c)/(\partial x)
# Rows: constraints c
# Columns: variables x
dc_dp = np.zeros((2,2))
# dc1/dp1 = -1
dc_dp[0,0] = -1
# dc2/dp2 = -1
dc_dp[1,1] = -1
### Uncertainty propagation
'''
Now lets test the uncertainty propagation package. We will assume p has covariance
sigma_p = [[2, 0], [0, 1]]
'''
## Prepare inputs
# Covariance matrix
sigma_p = np.array([[2, 0], [0, 1]])
# Nominal values for uncertain parameters
theta = {'p1':m.p1(), 'p2':m.p2()}
# Names of uncertain parameters
theta_names = ['p1','p2']
# Important to unfix the parameters!
# Otherwise k_aug will complain about too few degrees of freedom
m.p1.unfix()
m.p2.unfix()
## Run package
results = propagate_uncertainty(m, theta, sigma_p, theta_names)
## Check results
tmp_f = (df_dp + df_dx @ dx_dp)
sigma_f = tmp_f @ sigma_p @ tmp_f.transpose()
tmp_c = (dc_dp + dc_dx @ dx_dp)
sigma_c = tmp_c @ sigma_p @ tmp_c.transpose()
# This currently just checks if the order of the outputs did not change
# TODO: improve test robustness by using this information to set
# var_idx and theta_idx. This way the test will still work
# regardless of the order. In other words, the analytic solution needs to be
# reordered to match the variable/constraint order from
# this package. Alternately, the results could be converted into a Pandas dataframe
assert results.col == ['x1', 'x2', 'p1', 'p2', 'x3']
assert results.row == ['con1', 'con2', 'obj']
var_idx = np.array([True,True,False,False,True])
theta_idx = np.array([False,False,True,True,False])
# Check the gradient of the objective w.r.t. x matches
np.testing.assert_array_almost_equal(results.gradient_f[var_idx], np.array(df_dx))
# Check the gradient of the objective w.r.t. p (parameters) matches
np.testing.assert_array_almost_equal(results.gradient_f[theta_idx], np.array(df_dp))
# Check the Jacobian of the constraints w.r.t. x matches
np.testing.assert_array_almost_equal(results.gradient_c.toarray()[:, var_idx], np.array(dc_dx))
# Check the Jacobian of the constraints w.r.t. p (parameters) matches
np.testing.assert_array_almost_equal(results.gradient_c.toarray()[:, theta_idx], np.array(dc_dp))
# Check the NLP sensitivity results for the variables (x) matches
np.testing.assert_array_almost_equal(results.dsdp.toarray()[var_idx,:], | np.array(dx_dp) | numpy.array |
import numpy
import scipy.constants as codata
file_type = 1
tofloat = lambda s: | numpy.array(['0.0' if v == '' else v for v in s]) | numpy.array |
import numpy as np
def to_color(name):
if name == 'Non-Bitter' :
return 'blue'
else:
return 'green'
def read_data():
""" read the data from whereever
remove """
all = np.genfromtxt('./data/bitter dataBase.csv', delimiter=',')
data = all[1:, 4:]
txt = | np.genfromtxt('./data/bitter dataBase.csv', delimiter=',', dtype=None) | numpy.genfromtxt |
import numpy as np
from .grid import csgrid_GMAO
def calc_cs_face_area(lon_b, lat_b, r_sphere = 6.375e6):
"""Calculate area of cubed-sphere grid cells on one face
Inputs must be in degrees. Edge arrays must be
shaped [N+1 x N+1]
"""
# Convert inputs to radians
lon_b_rad = lon_b * np.pi / 180.0
lat_b_rad = lat_b * np.pi / 180.0
r_sq = r_sphere * r_sphere
n_cs = lon_b.shape[1] - 1
# Allocate output array
cs_area = np.zeros((n_cs,n_cs))
# Ordering
valid_combo = np.array([[1,2,4],[2,3,1],[3,2,4],[4,1,3]]) - 1
for i_lon in range(n_cs):
for i_lat in range(n_cs):
lon_corner = np.zeros(4)
lat_corner = np.zeros(4)
xyz_corner = | np.zeros((4,3)) | numpy.zeros |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = | N.array([1,1,2]) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
import sys, os
print( os.path.dirname( os.path.abspath('') ) )
sys.path.append( os.path.dirname( os.path.abspath('') ) )
# import numpy as np
# import torch
# import random
# from scipy.integrate import odeint, solve_ivp
# from sklearn.model_selection import train_test_split
# import torch_optimizer as optim_all
# from torch.optim.lr_scheduler import StepLR
# from dataclasses import dataclass
# import sys
# import os
# import tikzplotlib
# import pysindy as ps
# import polynomial_library_torch as pl_torch
# from utils import *
# from models import *
# from learning_models import *
# from scipy import signal
# torch.manual_seed(42)
# np.random.seed(seed=42)
import numpy as np
import torch
import os
from scipy.integrate import solve_ivp
from dataclasses import dataclass
import matplotlib.pyplot as plt
import Dictionary.polynomial_library_torch as pl_torch
from Functions.utils import printing_learned_rational_model, normalized_data
from Functions.modules import coeffs_dictionary_rational
from Functions.models import MM_Kinetics
from Functions.learning_models import learning_sparse_model_rational
from scipy import signal
from IPython.utils.io import Tee
from contextlib import closing
import tikzplotlib
torch.manual_seed(42)
np.random.seed(seed=42)
# In[2]:
@dataclass
class parameters:
bs: int = 2
num_epochs: int = 3000
num_iter = 8
lr: float = 1e-3
save_model_path: str = './Results/MMkinectics/Noise/'
weightdecay: float =0e-3
NumInitial: int = 4
dim_x: int = 1
timefinal: float = 8.0
timestep: float = 5e-2
noiselevel: float = 0.02
normalize: bool = False
denoising: bool = True
tol_coeffs: float = 1e-2
poly_order = 4
tikz_save: bool = False
Params = parameters()
os.makedirs(os.path.dirname(Params.save_model_path), exist_ok=True)
dynModel = MM_Kinetics
ts = np.arange(0,Params.timefinal,Params.timestep)
# Initial condition and simulation time
x = np.zeros((Params.NumInitial,len(ts),Params.dim_x))
Ts = np.zeros((Params.NumInitial,len(ts),1))
xp0 = np.linspace(0.5,2,num = Params.NumInitial)
for i in range(Params.NumInitial):
# x0 = np.random.rand(Params.dim_x,)
x0 = np.array(xp0[i]).reshape(-1,)
sol = solve_ivp(lambda t, x: dynModel(x, t), [ts[0], ts[-1]], x0, t_eval=ts)
x[i] = np.transpose(sol.y)
Ts[i] = ts.reshape(-1,1)
x_original = x.copy()
fig = plt.figure(figsize=(4, 2.5))
ax = fig.add_subplot(1, 1, 1)
for i in range(Params.NumInitial):
ax.plot(Ts[i],x[i],'o', markersize=1)
ax.set(xlabel="time", ylabel="$s(t)$")
tikzplotlib.save(Params.save_model_path + "MMKinectics_InitialCond.tex")
plt.show()
fig.savefig(Params.save_model_path + "MMKinectics_InitialCond.pdf", bbox_inches = 'tight',pad_inches = 0)
####### Adding noise
x = x + Params.noiselevel * np.random.randn(*x.shape)
x_noise = x.copy()
x_denoise = np.zeros_like(x_noise)
for i in range(x.shape[0]):
x_denoise[i,:,0] = signal.savgol_filter(x_noise[i,:,0], 31,3)
fig = plt.figure(figsize=(4, 2.5))
ax = fig.add_subplot(111)
for i in range(x.shape[0]):
if i == 0:
ax.plot(ts,x_noise[i,:,0],'o',markersize=2)
ax.plot(ts,x_original[i,:,0],'k--', linewidth = 0.2)
else:
ax.plot(ts,x_noise[i,:,0],'o',markersize=2)
ax.plot(ts,x_original[i,:,0],'k--', linewidth = 0.2)
ax.set(xlabel = "time",ylabel="$s$")
if Params.tikz_save:
tikzplotlib.save(Params.save_model_path + "MM_Kinectics_noisedata.tex")
plt.show()
fig.savefig(Params.save_model_path + "MM_Kinectics_noisedata.pdf", bbox_inches = 'tight',pad_inches = 0)
fig = plt.figure(figsize=(4, 2.5))
ax = fig.add_subplot(111)
for i in range(x.shape[0]):
if i == 0:
ax.plot(ts,x_denoise[i,:,0])
else:
ax.plot(ts,x_denoise[i,:,0])
plt.gca().set_prop_cycle(None)
for i in range(x.shape[0]):
if i == 0:
ax.plot(ts,x_noise[i,:,0],'o',markersize=2,alpha= 0.25)
else:
ax.plot(ts,x_noise[i,:,0],'o',markersize=2,alpha= 0.25)
ax.set(xlabel = "time",ylabel="$s$")
ax.legend()
if Params.tikz_save:
tikzplotlib.save(Params.save_model_path + "MM_Kinectics_denoisedata.tex")
plt.show()
fig.savefig(Params.save_model_path + "MM_Kinectics_denoisedata.pdf", bbox_inches = 'tight',pad_inches = 0)
data_nor = normalized_data(x_denoise)
data_nor.mean, data_nor.std
print('='*50)
print('Mean: {}'.format(data_nor.mean) + 'Std: {}'.format(data_nor.std))
print('='*50)
# data_nor.mean = np.array([0.25])
# data_nor.std = np.array([0.1])
x_denoise = data_nor.normalize_meanstd()
# Define dataloaders
train_dset = list(zip(torch.tensor(x_denoise[:,10:-20,:]).float(),Ts[:,10:-20,:]))
train_dl = torch.utils.data.DataLoader(train_dset, batch_size = Params.bs)
dataloaders = {'train': train_dl}
funs_dictionary = pl_torch.PolynomialLibrary(degree = Params.poly_order)
funs_dictionary.fit(x[0])
funs_dictionary_size = funs_dictionary.transform(x[0]).shape[1]
funs_dictionary_size
Coeffs_rational = coeffs_dictionary_rational(funs_dictionary_size,Params.dim_x)
Coeffs_rational, loss_track = learning_sparse_model_rational(funs_dictionary, Coeffs_rational, dataloaders, Params,
lr_reduction = 1.1, quite = True)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
for i in range(Params.num_iter):
ax.semilogy(loss_track[i], label = 'Number of zero terms: {}'.format(i))
ax.legend()
fig.show()
plot_kws = dict(linewidth=2)
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1)
for i in range(Params.num_iter):
ax.semilogy(loss_track[i], label = 'Number of zero terms: {}'.format(i))
ax.legend()
fig = plt.figure(figsize=(4, 2.5))
ax = fig.add_subplot(1, 1, 1)
for i in range(Params.num_iter):
ax.semilogy(i,loss_track[i,-1],'ko')
ax.semilogy(6,loss_track[6,-1],'go', markersize = 20,fillstyle='none')
ax.set(xlabel="Number of forced zero coefficients ", ylabel="Loss")
tikzplotlib.save(Params.save_model_path + "MMKinectics_noise_Pareto.tex")
plt.show()
fig.savefig(Params.save_model_path + "MMKinectics_noise_Pareto.pdf", bbox_inches = 'tight',pad_inches = 0)
####################
Coeffs_rational = torch.load(Params.save_model_path+'MM_model_coefficients_iter_{}.pkl'.format(6))
Learned_Coeffs_numerator = Coeffs_rational.numerator.weight.detach().clone().t().numpy()
Learned_Coeffs_denominator = Coeffs_rational.denominator.weight.detach().clone().t().numpy()
with closing(Tee(Params.save_model_path + "MM_Kinectics_learnedmodel.log", "a+", channel="stdout")) as outputstream:
# printing of the learned sparse models in a file
print('\n')
print('='*50)
print('RK4 Inspired Methods Sparse Identification')
printing_learned_rational_model(Learned_Coeffs_numerator, Learned_Coeffs_denominator, funs_dictionary.get_feature_names())
print('='*50)
# Adding one to denominator
Learned_Coeffs_denominator = np.concatenate((np.ones((1,1)),Learned_Coeffs_denominator),axis=0)
# Simulating models
fn = lambda z: (funs_dictionary.transform(np.expand_dims(z, axis=0))@Learned_Coeffs_numerator).reshape(-1,)
fd = lambda z: (funs_dictionary.transform(np.expand_dims(z, axis=0))@Learned_Coeffs_denominator).reshape(-1,)
f1 = lambda z: (fn(z)/fd(z))
learnt_deri = lambda z,t: np.array(f1(z))
x0 = np.array([2.])
x0_nor = (x0 - data_nor.mean)/data_nor.std
ts_refine = | np.arange(0,Params.timefinal,1e-2) | numpy.arange |
import numpy as np
import numpy.linalg as la
import torch
import torch.nn.functional as F
import torchvision
import json
import time
from matplotlib import pyplot as plt
#from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from lietorch import SE3, LieGroupParameter
from scipy.spatial.transform import Rotation as R
import cv2
from nerf import (get_ray_bundle, run_one_iter_of_nerf)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def mahalanobis(u, v, cov):
delta = u - v
m = torch.dot(delta, torch.matmul(torch.inverse(cov), delta))
return m
rot_x = lambda phi: torch.tensor([
[1., 0., 0.],
[0., torch.cos(phi), -torch.sin(phi)],
[0., torch.sin(phi), torch.cos(phi)]], dtype=torch.float32)
rot_x_np = lambda phi: np.array([
[1., 0., 0.],
[0., np.cos(phi), -np.sin(phi)],
[0., np.sin(phi), np.cos(phi)]], dtype=np.float32)
rot_psi = lambda phi: np.array([
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1]])
rot_theta = lambda th: np.array([
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1]])
rot_phi = lambda psi: np.array([
[np.cos(psi), -np.sin(psi), 0, 0],
[np.sin(psi), np.cos(psi), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
trans_t = lambda t: np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, t],
[0, 0, 0, 1]])
def SE3_to_trans_and_quat(data):
rot = data[:3, :3]
trans = data[:3, 3]
r = R.from_matrix(rot)
quat = r.as_quat()
return np.concatenate([trans, quat])
def find_POI(img_rgb, DEBUG=False): # img - RGB image in range 0...255
img = np.copy(img_rgb)
#img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#sift = cv2.SIFT_create()
#keypoints = sift.detect(img, None)
# Initiate ORB detector
orb = cv2.ORB_create()
# find the keypoints with ORB
keypoints2 = orb.detect(img,None)
#if DEBUG:
# img = cv2.drawKeypoints(img_gray, keypoints, img)
#keypoints = keypoints + keypoints2
keypoints = keypoints2
xy = [keypoint.pt for keypoint in keypoints]
xy = np.array(xy).astype(int)
# Remove duplicate points
xy_set = set(tuple(point) for point in xy)
xy = np.array([list(point) for point in xy_set]).astype(int)
return xy # pixel coordinates
def nearestPD(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = la.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(la.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not isPD(A3):
mineig = np.min(np.real(la.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = la.cholesky(B)
return True
except la.LinAlgError:
return False
class Estimator():
def __init__(self, filter_cfg, agent, start_state, filter=True) -> None:
# Parameters
self.batch_size = filter_cfg['batch_size']
self.kernel_size = filter_cfg['kernel_size']
self.dil_iter = filter_cfg['dil_iter']
self.lrate = filter_cfg['lrate']
self.sampling_strategy = filter_cfg['sampling_strategy']
self.reject_thresh = filter_cfg['reject_thresh']
self.agent = agent
self.is_filter = filter
#State initial estimate at time t=0
self.xt = start_state #Size 18
self.sig = 1e-1*torch.eye(start_state.shape[0])
self.Q = 1e-1*torch.eye(start_state.shape[0])
#self.sig = filter_cfg['sig0'] #State covariance 18x18
#self.Q = filter_cfg['Q'] #Process noise covariance
self.R = filter_cfg['R'] #Measurement covariance
self.iter = filter_cfg['N_iter']
#NERF SPECIFIC CONFIGS
# create meshgrid from the observed image
self.W, self.H, self.focal = filter_cfg['W'], filter_cfg['H'], filter_cfg['focal']
#self.coords = np.asarray(np.stack(np.meshgrid(np.linspace(0, self.W - 1, self.W), np.linspace(0, self.H - 1, self.H)), -1),
# dtype=int)
#Storage for plots
self.pixel_losses = {}
self.dyn_losses = {}
self.covariance = []
self.state_estimates = []
self.states = {}
self.predicted_states = []
self.actions = []
self.iteration = 0
def estimate_relative_pose(self, sensor_image, start_state, sig, obs_img_pose=None, obs_img=None, model_coarse=None, model_fine=None,cfg=None,
encode_position_fn=None, encode_direction_fn=None):
b_print_comparison_metrics = obs_img_pose is not None
b_generate_overlaid_images = b_print_comparison_metrics and obs_img is not None
obs_img_noised = sensor_image
W_obs = sensor_image.shape[0]
H_obs = sensor_image.shape[1]
# find points of interest of the observed image
POI = find_POI(obs_img_noised, False) # xy pixel coordinates of points of interest (N x 2)
### IF FEATURE DETECTION CANT FIND POINTS, RETURN INITIAL
if len(POI.shape) == 1:
self.pixel_losses[f'{self.iteration}'] = []
self.dyn_losses[f'{self.iteration}'] = []
self.states[f'{self.iteration}'] = []
return start_state.clone().detach(), False
obs_img_noised = (np.array(obs_img_noised) / 255.).astype(np.float32)
obs_img_noised = torch.tensor(obs_img_noised).cuda()
#sensor_image[POI[:, 1], POI[:, 0]] = [0, 255, 0]
# create meshgrid from the observed image
coords = np.asarray(np.stack(np.meshgrid(np.linspace(0, W_obs - 1, W_obs), np.linspace(0, H_obs - 1, H_obs)), -1), dtype=int)
# create sampling mask for interest region sampling strategy
interest_regions = np.zeros((H_obs, W_obs, ), dtype=np.uint8)
interest_regions[POI[:,1], POI[:,0]] = 1
I = self.dil_iter
interest_regions = cv2.dilate(interest_regions, np.ones((self.kernel_size, self.kernel_size), np.uint8), iterations=I)
interest_regions = | np.array(interest_regions, dtype=bool) | numpy.array |
import numpy as np
def compute_forward(t, const_forward, forward_tol, track, centre_point,
direction, f, func_args):
"""
Repeatedly multiply step size t by const_forward (where const_forward > 1)
until either forward_tol is met or until the function value cannot be
improved any further.
Parameters
----------
t : float
Initial guess of step size.
const_forward : float
The initial guess of the
step size will be multiplied by const_forward at each
iteration of forward tracking. That is,
t <- t * const_back
It should be noted that const_forward > 1.
forward_tol : float
It must be ensured that the step size computed by forward
tracking is not greater than forward_tol. If this is the
case, iterations of forward tracking are terminated.
track : 2-D array
Array containing the step sizes attempted along with the
corresponding repsonse function value.
centre_point : 1-D array
Apply local search to centre_point.
direction : 1-D array
Search direction used for local search.
f : function
response function.
`f(point, *func_args) -> float`
where point is a 1-D array with shape(d, ) and func_args is
a tuple of arguments needed to compute the response function value.
func_args : tuple
Arguments passed to the function f.
Returns
-------
track : 2-D array
Updated array containing the step sizes attempted along with the
corresponding response function value.
count_func_evals : integer
Total number of response function evaluations.
flag : boolean
If forward_tol has been met, flag=True. Otherwise, if forward_tol
has not been met, flag=False.
"""
count_func_evals = 0
while track[-2][1] > track[-1][1]:
t = t * const_forward
if t > forward_tol:
return track, count_func_evals, False
track = np.vstack((track,
np.array([t, f(np.copy(centre_point) -
t * direction, *func_args)])))
count_func_evals += 1
return track, count_func_evals, True
def forward_tracking(centre_point, t, f_old, f_new, direction, const_forward,
forward_tol, f, func_args):
"""
First part of forward_tracking() obtains a step size from
compute_forward(). Second part of forward_tracking() checks whether flag
is False. That is, if the forward_tol is met within compute_forward().
If flag is False, outputs are returned. Otherwise, if flag is True, it is
checked whether the response fuction can be improved further by applying
the two-in-a-row rule. If the response function can be improved, the last
entry in track is replaced (i.e. the step size and corresponding larger
response function value than previous iteration)
and replace with [t, f(centre_point - t * direction, *func_args)]. Then
compute_forward() is applied again.
If the response function cannot be improved with the two-in-a-row rule,
outputs are returned.
Parameters
----------
centre_point : 1-D array
Apply local search to centre_point.
t : float
Initial guess of step size.
f_old : float
Function value at f(centre_point, *func_args).
f_new : float
Function value at f(centre_point - t * direction, *func_args).
direction : 1-D array
Search direction used for local search.
const_forward : float
The initial guess of the
step size will be multiplied by const_forward at each
iteration of forward tracking. That is,
t <- t * const_back
It should be noted that const_forward > 1.
forward_tol : float
It must be ensured that the step size computed by forward
tracking is not greater than forward_tol. If this is the
case, iterations of forward tracking are terminated.
f : function
response function.
`f(point, *func_args) -> float`
where point is a 1-D array with shape(d, ) and func_args is
a tuple of arguments needed to compute the response function value.
func_args : tuple
Arguments passed to the function f.
Returns
-------
track : 2-D array
Updated array containing the step sizes attempted along with the
corresponding response function value.
count_func_evals : integer
Total number of response function evaluations.
flag : boolean
If forward_tol has been met, flag=True. Otherwise, if forward_tol
has not been met, flag=False.
"""
assert(const_forward > 1)
track = np.array([[0, f_old], [t, f_new]])
t = t * const_forward
track = np.vstack((track,
np.array([t, f(np.copy(centre_point) -
t * direction, *func_args)])))
total_func_evals = 1
track, count_func_evals, flag = (compute_forward
(t, const_forward, forward_tol,
track, centre_point, direction,
f, func_args))
total_func_evals += count_func_evals
if flag == False:
return track, total_func_evals, flag
while flag:
t = np.copy(track[-1][0]) * const_forward
f_new = f(np.copy(centre_point) - t * direction, *func_args)
total_func_evals += 1
if f_new < track[-2][1]:
track[-1] = np.array([t, f_new])
track, count_func_evals, flag = compute_forward(t, const_forward,
forward_tol, track,
centre_point,
direction,
f, func_args)
total_func_evals += count_func_evals
else:
return track, total_func_evals, flag
return track, total_func_evals, flag
def compute_backward(t, const_back, back_tol, track, centre_point, direction,
f, func_args):
"""
Decreases step size by a multiple of const_back (less than one) until
either back_tol is met or until the response function cannot be improved
any further.
Parameters
----------
t : float
Initial guess of step size.
const_back : float
If backward tracking is required, the initial guess of the
step size will be multiplied by const_back at each iteration
of backward tracking. That is,
t <- t * const_back
It should be noted that const_back < 1.
back_tol : float
It must be ensured that the step size computed by backward
tracking is not smaller than back_tol. If this is the case,
iterations of backward tracking are terminated. Typically,
back_tol is a very small number.
track : 2-D array
Array containing the step sizes attempted along with the
corresponding repsonse function value.
centre_point : 1-D array
Apply local search to centre_point.
direction : 1-D array
Search direction used for local search.
f : function
response function.
`f(point, *func_args) -> float`
where point` is a 1-D array with shape(d, ) and func_args is
a tuple of arguments needed to compute the response function value.
func_args : tuple
Arguments passed to the function f.
Returns
-------
track : 2-D array
Updated array containing the step sizes attempted along with the
corresponding response function value.
count_func_evals : integer
Total number of response function evaluations.
flag : boolean
If back_tol has been met, flag=True. Otherwise, if back_tol
has not been met, flag=False.
"""
count_func_evals = 0
while track[-2][1] > track[-1][1]:
t = t * const_back
if t < back_tol:
return track, count_func_evals, False
track = np.vstack((track,
np.array([t, f(np.copy(centre_point) -
t * direction, *func_args)])))
count_func_evals += 1
return track, count_func_evals, True
def backward_tracking(centre_point, t, f_old, f_new, direction, const_back,
back_tol, f, func_args):
"""
Decreases step size until the response function value at some step size
t is less than the response function value at the centre_point. The step
size is decreased in order to find the best response function value
possible. The two-in-a-row rule is used as the stopping criteria for the
step size.
Parameters
----------
centre_point : 1-D array
Apply local search to centre_point.
t : float
Initial guess of step size.
f_old : float
Function value at f(centre_point, *func_args).
f_new : float
Function value at f(centre_point - t * direction, *func_args).
direction : 1-D array
Search direction used for local search.
const_back : float
If backward tracking is required, the initial guess of the
step size will be multiplied by const_back at each iteration
of backward tracking. That is,
t <- t * const_back
It should be noted that const_back < 1.
back_tol : float
It must be ensured that the step size computed by backward
tracking is not smaller than back_tol. If this is the case,
iterations of backward tracking are terminated. Typically,
back_tol is a very small number.
f : function
response function.
`f(point, *func_args) -> float`
where point is a 1-D array with shape(d, ) and func_args is
a tuple of arguments needed to compute the response function value.
func_args : tuple
Arguments passed to the function f.
Returns
-------
track : 2-D array
Updated array containing the step sizes attempted along with the
corresponding response function value.
count_func_evals : integer
Total number of response function evaluations.
"""
assert(const_back < 1)
total_func_evals = 0
track = np.array([[0, f_old], [t, f_new]])
temp_track = np.copy(track)
while track[0][1] <= track[-1][1]:
t = t * const_back
if t < back_tol:
return temp_track, total_func_evals
else:
track = np.vstack((track,
np.array([t, f(np.copy(centre_point) -
t * direction, *func_args)])))
total_func_evals += 1
track, count_func_evals, flag = compute_backward(t, const_back, back_tol,
track, centre_point,
direction, f, func_args)
total_func_evals += count_func_evals
if flag == False:
return track, total_func_evals
while flag:
t = np.copy(track[-1][0]) * const_back
f_new = f(np.copy(centre_point) - t * direction, *func_args)
total_func_evals += 1
if f_new < track[-2][1]:
track[-1] = np.array([t, f_new])
(track,
count_func_evals,
flag) = compute_backward(t, const_back, back_tol, track,
centre_point, direction, f, func_args)
total_func_evals += count_func_evals
else:
return track, total_func_evals
return track, total_func_evals
def compute_coeffs(track_y, track_t):
"""
Minimizes the fitted quadratic model of the observed step sizes
and corresponding response function values.
Parameters:
-----------
track_y : 1-D array
Array containing response function values at each step size.
track_t : 1-D array
Array containing the tested step sizes.
Returns:
--------
coeffs : float
The point in which the quadratic model is minimized.
"""
design_matrix_step = np.vstack((np.repeat(track_y[0], len(track_y)),
np.array(track_t),
| np.array(track_t) | numpy.array |
# Runtime on 2.3GHz MacBook Pro with 8 Gb of RAM: ~10 minutes
# Requires: a file with limb darkening fit information that includes the intensity values
# Outputs: a .npy file with locations and values of the lowest I(mu) / I_1
# and the lowest I'(mu) / I_1 for every (T, g, lambda)
# Notes: to obtain the required file, run calc_limbdark with the -s option
import numpy as np
import pickle
import pa.lib.limbdark as limbdark
import pa.lib.fit as ft
import matplotlib.pyplot as plt
from matplotlib import rc
# real roots of a polynomial within bounds
# input: coefficients, lower bound, upper bound
def rts(a, b1, b2):
r = np.roots(a)
r = np.real(r[ np.isreal(r) ])
r = r[ (b1 <= r) & (r <= b2) ]
return r
# minimum of a polynomial: (location, value)
# inputs: polynomial's coefficients, locations of local suprema and boundary points
def minim(a, s, b1, b2):
pts = np.concatenate( (np.array([b1, b2]), s) )
val = np.polyval(a, pts)
i = np.argmin(val)
return pts[i], val[i]
iodir = '../../' # location of the data directory
# unpickle the limb darkening information
with open(iodir + 'data/limbdark_m01.pkl', 'rb') as f:
ld = pickle.load(f)
wl = ld.lam # 1221 wavelength
g = ld.g # 11 gravity
T = ld.T[0:1] # 61 temperature
bounds = ld.bounds
I = ld.I[..., 0:1] # (1221, 17, 11, 61) = (wavelength, mu, gravity, temperature)
a = ld.fit_params[0:1, ...] # (61, 11, 1221, 15) = (temperature, gravity, wavelength, parameter index)
sh = a.shape
# (temperature, gravity, wavelength, 4 values)
# Last dimension gives [smallest-value-mu, smallest-value, smallest_der_mu, smallest-der]
Imin = np.full( (sh[0], sh[1], sh[2], 4), np.nan )
# set the mu partition in the Fit class
ft.set_muB(bounds)
# create a bounds array that includes 0 and 1
bds = np.concatenate( (np.array([0]), bounds, np.array([1])) )
# (61, 11, 1221, 3, 5) = (temperature, gravity, wavelength, interval, function)
a = a.reshape( (sh[0], sh[1], sh[2], ft.m, ft.n) )
# permute the dimensions of the stored intensity array to (temperature, gravity, wavelength, mu)
I = np.transpose(I, axes=[3, 2, 0, 1])
print('Computing minima of intensity fits and their derivatives')
# check if any of the fits are negative or have negative derivatives
for iT in range(sh[0]):
print('T = ' + str(T[iT]))
for ig in range(sh[1]):
for iw in range(sh[2]):
I1 = I[iT, ig, iw,-1] # intensity at mu = 1 from the grid
Im = [np.nan, np.inf] # location and value of minimum I
Ipm = [np.nan, np.inf] # location and value of minimum I prime
for ii in range(ft.m):
# extract the coefficients on this interval
aa = a[iT, ig, iw, ii]
if ~ | np.isnan(aa[0]) | numpy.isnan |
import numpy as np
import pygame
import shapely.geometry
from fluids.utils import rotation_array
class Shape(object):
def __init__(self, x=0, y=0,
xdim=0, ydim=0,
points=[],
mass=0,
type=None,
angle=0, angle_deg=0,
color=(255, 255, 255),
border_color=(0xE4, 0xE4, 0xE4),
vis_level=1,
state=None,
collideables=[],
waypoints=None):
if angle_deg:
angle = np.deg2rad(angle_deg)
if not len(points):
corner_offsets = np.array([xdim / 2.0, ydim / 2.0])
centers = | np.array([x, y]) | numpy.array |
import cv2, argparse
import numpy as np
def makeCartoon(original):
# Make a copy of the origianl image to work with
img = np.copy(original)
# Convert image to grayscale
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply gaussian filter to the grayscale image
imgGray = cv2.GaussianBlur(imgGray, (3,3), 0)
# Detect edges in the image and threshold it
edges = cv2.Laplacian(imgGray, cv2.CV_8U, ksize=5)
edges = 255 - edges
ret, edgeMask = cv2.threshold(edges, 150, 255, cv2.THRESH_BINARY)
# Apply Edge preserving filter to get the heavily blurred image
imgBilateral = cv2.edgePreservingFilter(img, flags=2, sigma_s=50, sigma_r=0.4)
# Create a outputmatrix
output = np.zeros(imgGray.shape)
# Combine the cartoon and edges
output = cv2.bitwise_and(imgBilateral, imgBilateral, mask=edgeMask)
return output
def clarendon(original):
img = np.copy(original)
# Separate the channels
bChannel = img[:,:,0]
gChannel = img[:,:,1]
rChannel = img[:,:,2]
# Specifying the x-axis for mapping
xValues = np.array([0, 28, 56, 85, 113, 141, 170, 198, 227, 255])
# Specifying the y-axis for different channels
rCurve = np.array([0, 16, 35, 64, 117, 163, 200, 222, 237, 249 ])
gCurve = np.array([0, 24, 49, 98, 141, 174, 201, 223, 239, 255 ])
bCurve = np.array([0, 38, 66, 104, 139, 175, 206, 226, 245, 255 ])
# Creating the LUT to store the interpolated mapping
fullRange = np.arange(0,256)
bLUT = np.interp(fullRange, xValues, bCurve )
gLUT = np.interp(fullRange, xValues, gCurve )
rLUT = np.interp(fullRange, xValues, rCurve )
# Applying the mapping to the image using LUT
bChannel = cv2.LUT(bChannel, bLUT)
gChannel = cv2.LUT(gChannel, gLUT)
rChannel = cv2.LUT(rChannel, rLUT)
# Converting back to uint8
img[:,:,0] = np.uint8(bChannel)
img[:,:,1] = np.uint8(gChannel)
img[:,:,2] = np.uint8(rChannel)
return img
def adjustSaturation(original, saturationScale = 1.0):
img = np.copy(original)
# Convert to HSV color space
hsvImage = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
# Convert to float32
hsvImage = np.float32(hsvImage)
# Split the channels
H, S, V = cv2.split(hsvImage)
# Multiply S channel by scaling factor
S = np.clip(S * saturationScale , 0, 255)
# Merge the channels and show the output
hsvImage = np.uint8( cv2.merge([H, S, V]) )
imSat = cv2.cvtColor(hsvImage, cv2.COLOR_HSV2BGR)
return imSat
def moon(original):
img = np.copy(original)
# Specifying the x-axis for mapping
origin = np.array([0, 15, 30, 50, 70, 90, 120, 160, 180, 210, 255 ])
# Specifying the y-axis for mapping
Curve = np.array([0, 0, 5, 15, 60, 110, 150, 190, 210, 230, 255 ])
# Creating the LUT to store the interpolated mapping
fullRange = np.arange(0,256)
LUT = np.interp(fullRange, origin, Curve )
# Applying the mapping to the L channel of the LAB color space
labImage = cv2.cvtColor(img,cv2.COLOR_BGR2LAB)
labImage[:,:,0] = cv2.LUT(labImage[:,:,0], LUT)
img = cv2.cvtColor(labImage,cv2.COLOR_LAB2BGR)
# Desaturating the image
img = adjustSaturation(img,0.01)
return img
def adjustContrast(original, scaleFactor):
img = np.copy(original)
# Convert to YCrCb color space
ycbImage = cv2.cvtColor(img,cv2.COLOR_BGR2YCrCb)
# Convert to float32 since we will be doing multiplication operation
ycbImage = np.float32(ycbImage)
# Split the channels
Ychannel, Cr, Cb = cv2.split(ycbImage)
# Scale the Ychannel
Ychannel = np.clip(Ychannel * scaleFactor , 0, 255)
# Merge the channels and show the output
ycbImage = np.uint8( cv2.merge([Ychannel, Cr, Cb]) )
img = cv2.cvtColor(ycbImage, cv2.COLOR_YCrCb2BGR)
return img
def applyVignette(original, vignetteScale):
img = np.copy(original)
# convert to float
img = np.float32(img)
rows,cols = img.shape[:2]
# Compute the kernel size from the image dimensions
k = np.min(img.shape[:2])/vignetteScale
# Create a kernel to get the halo effect
kernelX = cv2.getGaussianKernel(cols,k)
kernelY = cv2.getGaussianKernel(rows,k)
# generating vignette mask using Gaussian kernels
kernel = kernelY * kernelX.T
# Normalize the kernel
mask = 255 * kernel / np.linalg.norm(kernel)
mask = cv2.GaussianBlur(mask, (51,51), 0)
# Apply the halo to all the channels of the image
img[:,:,0] += img[:,:,0]*mask
img[:,:,1] += img[:,:,1]*mask
img[:,:,2] += img[:,:,2]*mask
img = np.clip(img/2, 0, 255)
# cv2.imshow("mask",mask)
# cv2.waitKey(0)
# cv2.imwrite("results/vignetteMask.jpg", 255*mask)
return | np.uint8(img) | numpy.uint8 |
import numpy as np
import pandas as pd
from tqdm import tqdm
import numpy.ma as ma
from scipy.special import gammaln
from pykalman import KalmanFilter
from pynowcasting.pycsminwel import csminwel
class BVARGLP(object):
def __init__(self, data, lags, hz=8, vc=10e6, stationary_prior=None, crit=1e-16,
hyperpriors=True, mnpsi=True, mnalpha=False, sur=True, noc=True,
fcast=False, mcmc=False, ndraws=20000, ndrawsdiscard=None, mcmcconst=1,
mcmcfcast=True, mcmcstorecoef=True, verbose=False):
"""
This class implements the Bayesian VAR from Giannone, Lenza and Primiceri (2012), hence the name GLP. The main
idea of the models is to use multiple priors, each with their own hyperprior, in order to generate a shrinkage
behaviour.
This class only accepts data with a quarterly frequency and with no missign data.
@param hyperpriors: False = no priors on hyperparameters
True = reference priors on hyperparameters (default)
[NOTE: hyperpriors on psi calibrated for data expressed in
4 x logs, such as 4 x log(GDP). Thus if interest rate is in
percentage, divide by 100]
@param vc: prior variance in the MN prior for the coefficients multiplying
the contant term (Default: vc=10e6)
@param stationary_prior: names of the variables that enter the VAR in first
differences and for which one might want to set the prior mean
on the coefficient on the first own lag in the MN prior and the
prior mean of the sum-of-coefficients prior to 0 (instead of
the typical 1)
@param mnpsi: False = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals NOT treated as
hyperparameters (set to the residual variance of an AR(1))
True = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals treated as hyperparameters (default)
@param mnalpha: False = Lag-decaying parameter of the MN prior set to 2 and
NOT treated as hyperparameter (default)
True = Lag-decaying parameter of the MN prior treated as
hyperparameter
@param sur: False = single-unit-root prior is OFF
True = single-unit-root prior is ON and its std is treated as an
hyperparameter (default)
@param noc: False = no-cointegration (sum-of coefficients) prior is OFF
True = no-cointegration (sum-of coefficients) is ON and its std is
treated as an hyperparameter (default)
@param fcast: False = does not generate forecasts at the posterior mode
True = generates forecasts at the posterior mode (default)
@param hz: number of quarters for which it generates forecasts (default: hz=8)
@param mcmc: False = does not run the MCMC (default)
True = runs the MCMC after the maximization
@param ndraws: number of draws in the MCMC (default: Ndraws=20000)
@param ndrawsdiscard: number of draws initially discarded to allow convergence
in the in the MCMC (default=Ndraws/2)
@param mcmcconst: scaling constant for the MCMC (should be calibrated to achieve
an acceptance rate of approx 25%) (default: MCMCconst=1)
@param mcmcfcast: False = does not generate forecasts when running the MCMC
True = generates forecasts while running the MCMC
(for each draw of the hyperparameters the code takes a
draw of the VAR coefficients and shocks, and generates
forecasts at horizons hz) (default).
@param mcmcstorecoef: False = does not store the MCMC draws of the VAR
coefficients and residual covariance matrix
True = stores the MCMC draws of the VAR coefficients and
residual covariance matrix (default)
@param verbose: Prints relevant information during the estimation.
@param crit: value for convergence criteria
"""
assert data.index.inferred_freq == 'Q', "input 'data' must be quarterly and recognized by pandas."
self.data = data
self.lags = lags
self.hyperpriors = hyperpriors
self.vc = vc
self.stationary_prior = stationary_prior
if stationary_prior is None:
self.pos = None
else:
self.pos = [self.data.columns.get_loc(var) for var in stationary_prior]
self.mnalpha = mnalpha
self.mnpsi = mnpsi
self.sur = sur
self.noc = noc
self.fcast = fcast
self.hz = hz
self.mcmc = mcmc
self.ndraws = ndraws
self.ndrwasdiscard = int(ndraws/2) if ndrawsdiscard is None else ndrawsdiscard
self.mcmccosnt = mcmcconst
self.mcmcfcast = mcmcfcast
self.mcmcstorecoef = mcmcstorecoef
self.verbose = verbose
self.crit = crit
self.TT = data.shape[0] # Time-series sample size without lags
self.n = data.shape[1] # Number of variables in the VAR
self.k = self.n * self.lags + 1 # Number of coefficients on each equation
self._set_priors()
self._regressor_matrix_ols()
self._minimization()
if self.fcast:
self._forecasts()
if self.mcmc:
self._mcmc()
def _set_priors(self):
# Sets up the default choices for the priors of the BVAR of Giannone, Lenza and Primiceri (2012)
if self.hyperpriors:
# hyperprior mode
mode_lambda = 0.2
mode_miu = 1
mode_theta = 1
# hyperprior sds
sd_lambda = 0.4
sd_miu = 1
sd_theta = 1
# scale and shape of the IG on psi/(d-n-1)
scalePSI = 0.02 ** 2
priorcoef = pd.DataFrame(index=['lambda', 'miu', 'theta', 'alpha', 'beta'],
columns=['r_k', 'r_theta', 'PSI'])
priorcoef.loc['lambda', 'r_k'], priorcoef.loc['lambda', 'r_theta'] = \
self._gamma_coef(mode_lambda, sd_lambda)
priorcoef.loc['miu', 'r_k'], priorcoef.loc['miu', 'r_theta'] = self._gamma_coef(mode_miu, sd_miu)
priorcoef.loc['theta', 'r_k'], priorcoef.loc['theta', 'r_theta'] = self._gamma_coef(mode_theta, sd_theta)
priorcoef.loc['alpha', 'PSI'] = scalePSI
priorcoef.loc['beta', 'PSI'] = scalePSI
self.priorcoef = priorcoef
else:
self.priorcoef = None
def _regressor_matrix_ols(self):
# purpose is to construct the SS matrix
# Constructs the matrix of regressors
n = self.n
lags = self.lags
data = self.data
x = np.zeros((self.TT, self.k))
x[:, 0] = 1
for i in range(1, self.lags + 1):
x[:, 1 + (i - 1) * n: i * n + 1] = data.shift(i).values
self.y0 = data.iloc[:lags, :].mean().values
self.x = x[lags:, :]
self.y = data.values[lags:, :]
self.T = self.y.shape[0] # Sample size after lags
# OLS for AR(1) residual variance of each equation
SS = np.zeros(self.n)
for i in range(self.n):
y_reg = self.y[1:, i]
x_reg = np.hstack((np.ones((self.T - 1, 1)), self.y[:-1, i].reshape((-1, 1))))
ar1 = OLS1(y_reg, x_reg)
SS[i] = ar1.sig2hatols
self.SS = SS
def _minimization(self):
# Starting values for the minimization
self.lambda0 = 0.2 # std of MN prior
self.theta0 = 1 # std of SUR prior
self.miu0 = 1 # std NOC prior
self.alpha0 = 2 # lag-decaying parameter of the MN prior
self.psi0 = self.SS
# Bounds for the minimization step
self.lambda_min = 0.0001
self.lambda_max = 5
self.alpha_min = 0.1
self.alpha_max = 5
self.theta_min = 0.0001
self.theta_max = 50
self.miu_min = 0.0001
self.miu_max = 50
self.psi_min = self.SS / 100
self.psi_max = self.SS * 100
# Transforming inputs to unbounded and builds the initial guess
x0 = np.array([-np.log((self.lambda_max - self.lambda0) / (self.lambda0 - self.lambda_min))])
if self.mnpsi:
inpsi = -np.log((self.psi_max - self.psi0) / (self.psi0 - self.psi_min))
x0 = np.concatenate((x0, inpsi))
if self.sur:
intheta = np.array([-np.log((self.theta_max - self.theta0) / (self.theta0 - self.theta_min))])
x0 = np.concatenate((x0, intheta))
if self.noc:
inmiu = np.array([-np.log((self.miu_max - self.miu0) / (self.miu0 - self.miu_min))])
x0 = np.concatenate((x0, inmiu))
if self.mnalpha:
inalpha = np.array([-np.log((self.alpha_max - self.alpha0) / (self.alpha0 - self.alpha_min))])
x0 = np.concatenate((x0, inalpha))
# initial guess for the inverse Hessian
H0 = 10 * np.eye(len(x0))
# Minimization of the negative of the posterior of the hyperparameters
def myfun(xxx):
logML, _, _ = self._logmlvar_formin(xxx)
return -logML
# Optimization
fh, xh, gh, h, itct, fcount, retcodeh = csminwel(fcn=myfun,
x0=x0,
h0=H0,
grad=None,
crit=self.crit,
nit=1000,
verbose=self.verbose)
self.itct = itct
self.xh = xh
self.h = h
self.log_post, self.betahat, self.sigmahat = self._logmlvar_formin(xh)
self.lamb = self.lambda_min + (self.lambda_max - self.lambda_min) / (1 + np.exp(-xh[0]))
self.theta = self.theta_max
self.miu = self.miu_max
if self.mnpsi:
# diagonal elements of the scale matrix of the IW prior on the residual variance
self.psi = self.psi_min + (self.psi_max - self.psi_min) / (1 + np.exp(-xh[1:self.n + 1]))
if self.sur:
# std of sur prior at the peak
self.theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-xh[self.n + 1]))
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[self.n + 2]))
else: # self.sur == 0
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[self.n + 1]))
else: # self.mnpsi == 0
self.psi = self.SS
if self.sur:
# std of sur prior at the peak
self.theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-xh[1]))
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[2]))
else:
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[1]))
if not self.mnalpha:
self.alpha = 2
else:
# Lag-decaying parameter of the MN prior
self.alpha = self.alpha_min + (self.alpha_max - self.alpha_min) / (1 + np.exp(-xh[-1]))
def _forecasts(self):
# Forecasts ate the posterior mode
Y = np.vstack([self.y, np.zeros((self.hz, self.n))])
for tau in range(self.hz):
indexes = list(range(self.T + tau - 1, self.T + tau - self.lags - 1, -1))
xT = np.vstack([1, Y[indexes].T.reshape((self.k - 1, 1), order="F")]).T
Y[self.T + tau, :] = xT @ self.betahat
self.forecast = Y[-self.hz:, :]
def _mcmc(self):
# Jacobian of the transformation of the hyperparameters that has been
# used for the constrained maximization
JJ = np.exp(self.xh) / ((1 + np.exp(self.xh)) ** 2)
JJ[0] = (self.lambda_max - self.lambda_min) * JJ[0]
if self.mnpsi:
JJ[1: self.n + 1] = (self.psi_max - self.psi_min) * JJ[1: self.n + 1]
if self.sur:
JJ[self.n + 1] = (self.theta_max - self.theta_min) * JJ[self.n + 1]
if self.noc:
JJ[self.n + 2] = (self.miu_max - self.miu_min) * JJ[self.n + 2]
else:
if self.noc:
JJ[self.n + 1] = (self.miu_max - self.miu_min) * JJ[self.n + 1]
else:
if self.sur:
JJ[1] = (self.theta_max - self.theta_min) * JJ[1]
if self.noc:
JJ[2] = (self.miu_max - self.miu_min) * JJ[2]
else:
if self.noc:
JJ[1] = (self.miu_max - self.miu_min) * JJ[1]
if self.mnalpha:
JJ[-1] = (self.alpha_max - self.alpha_min) * JJ[-1]
JJ = np.diag(JJ)
HH = JJ @ self.h @ JJ
# Regularization to assure that HH is positive-definite
eigval, eigvec = np.linalg.eig(HH)
HH = eigvec @ np.diag(np.abs(eigval)) @ eigvec.T
# recovering the posterior mode
postmode = np.array([self.lamb])
if self.mnpsi:
modepsi = np.array(self.psi)
postmode = np.concatenate((postmode, modepsi))
if self.sur:
modetheta = np.array([self.theta])
postmode = np.concatenate((postmode, modetheta))
if self.noc:
modemiu = np.array([self.miu])
postmode = np.concatenate((postmode, modemiu))
if self.mnalpha:
modealpha = np.array([self.alpha])
postmode = np.concatenate((postmode, modealpha))
# starting value of the Metropolis algorithm
P = np.zeros((self.ndraws, self.xh.shape[0]))
logMLold = -10e15
while logMLold == -10e15:
P[0, :] = np.random.multivariate_normal(mean=postmode,
cov=(self.mcmccosnt ** 2) * HH)
logMLold, betadrawold, sigmadrawold = self._logmlvar_formcmc(P[0])
# matrix to store the draws of the VAR coefficients if MCMCstorecoeff is on
if self.mcmcstorecoef:
mcmc_beta = np.zeros((self.k, self.n, self.ndraws - self.ndrwasdiscard))
mcmc_sigma = np.zeros((self.n, self.n, self.ndraws - self.ndrwasdiscard))
else:
mcmc_beta = None
mcmc_sigma = None
# matrix to store the forecasts if MCMCfcast is on
if self.mcmcfcast:
mcmc_Dforecast = np.zeros((self.hz, self.n, self.ndraws - self.ndrwasdiscard))
else:
mcmc_Dforecast = None
# Metropolis iterations
count = 0
for i in tqdm(range(1, self.ndraws), 'MCMC Iterations', disable=not self.verbose):
# draw candidate value
P[i, :] = np.random.multivariate_normal(mean=P[i - 1, :],
cov=(self.mcmccosnt ** 2) * HH)
logMLnew, betadrawnew, sigmadrawnew = self._logmlvar_formcmc(P[i, :])
if logMLnew > logMLold: # if there is an improvement, accept it
logMLold = logMLnew
count = count + 1
else: # If there is no improvement, there is a chance to accept the draw
if np.random.rand() < np.exp(logMLnew - logMLold): # If accetpted
logMLold = logMLnew
count = count + 1
else: # If not accepted, overwrite the draw with the last value
P[i, :] = P[i - 1, :]
# if MCMCfcast is on, take a new draw of the VAR coefficients with
# the old hyperparameters if have rejected the new ones
if self.mcmcfcast or self.mcmcstorecoef:
_, betadrawnew, sigmadrawnew = self._logmlvar_formcmc(P[i, :])
# stores draws of VAR coefficients if MCMCstorecoeff is on
if (i >= self.ndrwasdiscard) and self.mcmcstorecoef:
mcmc_beta[:, :, i - self.ndrwasdiscard] = betadrawnew
mcmc_sigma[:, :, i - self.ndrwasdiscard] = sigmadrawnew
# produce and store the forecasts if MCMCfcast is on
if (i >= self.ndrwasdiscard) and self.mcmcfcast:
Y = np.vstack([self.y, np.zeros((self.hz, self.n))])
for tau in range(self.hz):
indexes = list(range(self.T + tau - 1, self.T + tau - self.lags - 1, -1))
xT = np.vstack([1, Y[indexes].T.reshape((self.k - 1, 1), order="F")]).T
Y[self.T + tau, :] = xT @ betadrawnew + np.random.multivariate_normal(mean=np.zeros(self.n),
cov=sigmadrawnew)
mcmc_Dforecast[:, :, i - self.ndrwasdiscard] = Y[-self.hz:, :]
# store the draws of the hyperparameters
mcmc_lambda = P[self.ndrwasdiscard:, 0] # Standard Minesota Prior
mcmc_psi = None
mcmc_theta = None
mcmc_miu = None
if self.mnpsi:
# diagonal elements of the scale matrix of the IW prior on the residual variance
mcmc_psi = P[self.ndrwasdiscard:, 1:self.n+2]
if self.sur:
# std of sur prior
mcmc_theta = P[self.ndrwasdiscard:, self.n + 1]
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, self.n + 2]
else: # self.sur == 0
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, self.n + 1]
else: # self.mnpsi == 0
if self.sur:
# std of sur prior
mcmc_theta = P[self.ndrwasdiscard:, 1]
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, 2]
else: # self.sur == 0
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, 1]
if self.mnalpha:
# Lag-decaying parameter of the MN prior
mcmc_alpha = P[self.ndrwasdiscard:, -1]
self.mcmc_alpha = mcmc_alpha
mcmc_accrate = np.mean((mcmc_lambda[1:] != mcmc_lambda[:-1]))
# Save the chains as attributes
self.mcmc_beta = mcmc_beta
self.mcmc_sigma = mcmc_sigma
self.mcmc_dforecast = mcmc_Dforecast
self.mcmc_lambda = mcmc_lambda
self.mcmc_psi = mcmc_psi
self.mcmc_theta = mcmc_theta
self.mcmc_miu = mcmc_miu
self.mcmc_accrate = mcmc_accrate
def _logmlvar_formin(self, par):
"""
This function computes the log-posterior (or the logML if hyperpriors=0),
the posterior mode of the coefficients and the covariance matrix of the
residuals of the BVAR of Giannone, Lenza and Primiceri (2012)
"""
# The following avoids the warning "referenced before assignment"
theta = None
miu = None
# hyperparameters
lambda_ = self.lambda_min + (self.lambda_max - self.lambda_min) / (1 + np.exp(-par[0]))
d = self.n + 2
if not self.mnpsi:
psi = self.SS * (d - self.n - 1)
if self.sur:
theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-par[1]))
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[2]))
else:
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[1]))
else:
psi = self.psi_min + (self.psi_max - self.psi_min) / (1 + np.exp(-par[1:self.n + 1]))
if self.sur:
theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-par[self.n + 1]))
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[self.n + 2]))
else:
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[self.n + 1]))
if not self.mnalpha:
alpha = 2
else: # self.mnalpha == 1
alpha = self.alpha_min + (self.alpha_max - self.alpha_min) / (1 + np.exp(-par[-1]))
# Setting up the priors
omega = np.zeros(self.k)
omega[0] = self.vc
for i in range(1, self.lags + 1):
omega[1 + (i - 1) * self.n: 1 + i * self.n] = \
(d - self.n - 1) * (lambda_ ** 2) * (1 / (i ** alpha)) / psi
# Prior scale matrix for the covariance of the shocks
PSI = np.diag(psi)
# dummy observations if sur and / or noc = 1
Td = 0
xdsur = np.array([]).reshape((0, self.k))
ydsur = np.array([]).reshape((0, self.n))
xdnoc = np.array([]).reshape((0, self.k))
ydnoc = np.array([]).reshape((0, self.n))
y = self.y.copy()
x = self.x.copy()
T = self.T
if self.sur:
xdsur = (1 / theta) * np.tile(self.y0, (1, self.lags))
xdsur = np.hstack((np.array([[1 / theta]]), xdsur))
ydsur = (1 / theta) * self.y0
y = np.vstack((y, ydsur))
x = np.vstack((x, xdsur))
Td = Td + 1
if self.noc:
ydnoc = (1 / miu) * np.diag(self.y0)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
if self.pos is not None:
ydnoc[self.pos, self.pos] = 0
xdnoc = (1 / miu) * np.tile(np.diag(self.y0), (1, self.lags))
xdnoc = np.hstack((np.zeros((self.n, 1)), xdnoc))
y = np.vstack((y, ydnoc))
x = np.vstack((x, xdnoc))
Td = Td + self.n
T = T + Td
# ===== OUTPUT ===== #
# Minnesota prior mean
b = np.zeros((self.k, self.n))
diagb = np.ones(self.n)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
if self.pos is not None:
diagb[self.pos] = 0
b[1:self.n + 1, :] = np.diag(diagb)
# posterior mode of the VAR coefficients
matA = x.T @ x + np.diag(1 / omega)
matB = x.T @ y + np.diag(1 / omega) @ b
betahat = np.linalg.solve(matA, matB) # np.solve runs more efficiently that inverting a gigantic matrix
# VAR residuals
epshat = y - x @ betahat
# Posterior mode of the covariance matrix
sigmahat = (epshat.T @ epshat + PSI + (betahat - b).T @ np.diag(1 / omega) @ (betahat - b))
sigmahat = sigmahat / (T + d + self.n + 1)
# logML
aaa = np.diag(np.sqrt(omega)) @ x.T @ x @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshat.T @ epshat + (betahat - b).T @ np.diag(1/omega) @
(betahat-b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
logML = - self.n * T * np.log(np.pi) / 2
logML = logML + sum(gammaln((T + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
logML = logML - T * sum(np.log(psi)) / 2
logML = logML - self.n * sum(np.log(eigaaa)) / 2
logML = logML - (T + d) * sum(np.log(eigbbb)) / 2
if self.sur or self.noc:
yd = np.vstack((ydsur, ydnoc))
xd = np.vstack((xdsur, xdnoc))
# prior mode of the VAR coefficients
betahatd = b
# VAR residuals at the prior mode
epshatd = yd - xd @ betahatd
aaa = np.diag(np.sqrt(omega)) @ xd.T @ xd @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshatd.T @ epshatd + (betahatd - b).T @ np.diag(1 / omega) @
(betahatd - b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
# normalizing constant
norm = - self.n * Td * np.log(np.pi) / 2
norm = norm + sum(gammaln((Td + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
norm = norm - Td * sum( | np.log(psi) | numpy.log |
import batoid
import numpy as np
from test_helpers import timer, init_gpu, rays_allclose, checkAngle, do_pickle
@timer
def test_properties():
rng = np.random.default_rng(5)
size = 10
for i in range(100):
x = rng.normal(size=size)
y = rng.normal(size=size)
z = rng.normal(size=size)
vx = rng.normal(size=size)
vy = rng.normal(size=size)
vz = rng.normal(size=size)
t = rng.normal(size=size)
w = rng.normal(size=size)
fx = rng.normal(size=size)
vig = rng.choice([True, False], size=size)
fa = rng.choice([True, False], size=size)
cs = batoid.CoordSys(
origin=rng.normal(size=3),
rot=batoid.RotX(rng.normal())@batoid.RotY(rng.normal())
)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, w, fx, vig, fa, cs)
np.testing.assert_array_equal(rv.x, x)
np.testing.assert_array_equal(rv.y, y)
np.testing.assert_array_equal(rv.z, z)
np.testing.assert_array_equal(rv.r[:, 0], x)
np.testing.assert_array_equal(rv.r[:, 1], y)
np.testing.assert_array_equal(rv.r[:, 2], z)
np.testing.assert_array_equal(rv.vx, vx)
np.testing.assert_array_equal(rv.vy, vy)
np.testing.assert_array_equal(rv.vz, vz)
np.testing.assert_array_equal(rv.v[:, 0], vx)
np.testing.assert_array_equal(rv.v[:, 1], vy)
np.testing.assert_array_equal(rv.v[:, 2], vz)
np.testing.assert_array_equal(rv.k[:, 0], rv.kx)
np.testing.assert_array_equal(rv.k[:, 1], rv.ky)
np.testing.assert_array_equal(rv.k[:, 2], rv.kz)
np.testing.assert_array_equal(rv.t, t)
np.testing.assert_array_equal(rv.wavelength, w)
np.testing.assert_array_equal(rv.flux, fx)
np.testing.assert_array_equal(rv.vignetted, vig)
np.testing.assert_array_equal(rv.failed, fa)
assert rv.coordSys == cs
rv._syncToDevice()
do_pickle(rv)
@timer
def test_positionAtTime():
rng = np.random.default_rng(57)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, 0.0)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.0, -1.1, 2.5]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + t1 * rv.v
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, t)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.4, -1.3, 2.1]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + rv.v*(t1-rv.t)[:,None]
)
@timer
def test_propagate():
rng = np.random.default_rng(577)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
@timer
def test_phase():
rng = np.random.default_rng(5772)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
# First explicitly check that phase is 0 at position and time of individual
# rays
for i in rng.choice(size, size=10):
np.testing.assert_equal(
rv.phase(rv.r[i], rv.t[i])[i],
0.0
)
# Now use actual formula
# phi = k.(r-r0) - (t-t0)omega
# k = 2 pi v / lambda |v|^2
# omega = 2 pi / lambda
# |v| = 1 / n
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
phi = np.einsum("ij,ij->i", rv.v, r1-rv.r)
phi *= n*n
phi -= (t1-rv.t)
phi *= 2*np.pi/wavelength
np.testing.assert_allclose(
rv.phase(r1, t1),
phi,
rtol=0,
atol=1e-7
)
for i in rng.choice(size, size=10):
s = slice(i, i+1)
rvi = batoid.RayVector(
x[s], y[s], z[s],
vx[s], vy[s], vz[s],
t[s].copy(), wavelength[s].copy()
)
# Move integer number of wavelengths ahead
ti = rvi.t[0]
wi = rvi.wavelength[0]
r1 = rvi.positionAtTime(ti + 5123456789*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Half wavelength
r1 = rvi.positionAtTime(ti + 6987654321.5*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Quarter wavelength
r1 = rvi.positionAtTime(ti + 0.25*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=2e-5)
# Three-quarters wavelength
r1 = rvi.positionAtTime(ti + 7182738495.75*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=2e-5)
# We can also keep the position the same and change the time in
# half/quarter integer multiples of the period.
a = rvi.amplitude(rvi.r[0], rvi.t[0]+5e9*wi)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+5.5)*wi)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+2.25)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+1.75)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=1e-5)
# If we pick a point anywhere along a vector originating at the ray
# position, but orthogonal to its direction of propagation, then we
# should get phase = 0 (mod 2pi).
v1 = np.array([1.0, 0.0, 0.0])
v1 = np.cross(rvi.v[0], v1)
p1 = rvi.r[0] + v1
a = rvi.amplitude(p1, rvi.t[0])
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
@timer
def test_sumAmplitude():
import time
rng = np.random.default_rng(57721)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
satime = 0
atime = 0
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
at0 = time.time()
s1 = rv.sumAmplitude(r1, t1)
at1 = time.time()
s2 = np.sum(rv.amplitude(r1, t1))
at2 = time.time()
np.testing.assert_allclose(s1, s2, rtol=0, atol=1e-11)
satime += at1-at0
atime += at2-at1
# print(f"sumAplitude() time: {satime}")
# print(f"np.sum(amplitude()) time: {atime}")
@timer
def test_equals():
import time
rng = np.random.default_rng(577215)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
flux = rng.uniform(0.9, 1.1, size=size)
vignetted = rng.choice([True, False], size=size)
failed = rng.choice([True, False], size=size)
args = x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed
rv = batoid.RayVector(*args)
rv2 = rv.copy()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
# Repeat, but force comparison on device
rv2 = rv.copy()
rv._rv.x.syncToDevice()
rv._rv.y.syncToDevice()
rv._rv.z.syncToDevice()
rv._rv.vx.syncToDevice()
rv._rv.vy.syncToDevice()
rv._rv.vz.syncToDevice()
rv._rv.t.syncToDevice()
rv._rv.wavelength.syncToDevice()
rv._rv.flux.syncToDevice()
rv._rv.vignetted.syncToDevice()
rv._rv.failed.syncToDevice()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
@timer
def test_asGrid():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
nx = 1
while (nx%2) == 1:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-2)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
# Some things that should be equivalent
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
dx=dx, lx=lx, dirCos=dirCos
)
grid4 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), dirCos=dirCos
)
theta_x, theta_y = batoid.utils.dirCosToField(*dirCos)
grid5 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), theta_x=theta_x, theta_y=theta_y
)
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
rays_allclose(grid1, grid4)
rays_allclose(grid1, grid5)
# Check distance to chief ray
cridx = (nx//2)*nx+nx//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
# Another set, but with odd nx
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
while (nx%2) == 0:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-1)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0), dirCos=dirCos
)
# ... but the following is not equivalent, since default is to always
# infer an even nx and ny
# grid4 = batoid.RayVector.asGrid(
# backDist=backDist, wavelength=wavelength,
# dx=1/9, lx=1.0, dirCos=dirCos
# )
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
cridx = (nx*nx-1)//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
for _ in range(10):
# Check nrandom
rays = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
lx=1.0, nx=1,
nrandom=1000, dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
# Check that projected points are inside region
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.z, 0.0)
np.testing.assert_array_less(rays.x, 0.5)
np.testing.assert_array_less(rays.y, 0.5)
np.testing.assert_array_less(-0.5, rays.x)
np.testing.assert_array_less(-0.5, rays.y)
assert len(rays) == 1000
@timer
def test_asPolar():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
nrad = rng.integers(1, 11)
naz = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays)%6 == 0
# If we set inner=0, then last ray should
# intersect the center of the pupil
inner = 0.0
rays = batoid.RayVector.asPolar(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
nrad=nrad, naz=naz,
dirCos=dirCos
)
assert len(rays)%6 == 1
pupil = batoid.Plane()
pupil.intersect(rays)
np.testing.assert_allclose(rays.x[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.y[-1], 0, atol=1e-14)
np.testing.assert_allclose(rays.z[-1], 0, atol=1e-14)
@timer
def test_asSpokes():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
inner = rng.uniform(1.0, 3.0)
outer = inner + rng.uniform(1.0, 3.0)
rings = rng.integers(1, 11)
spokes = rng.integers(10, 21)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
spokes=spokes, rings=rings,
dirCos=dirCos
)
np.testing.assert_allclose(rays.t, 0)
np.testing.assert_allclose(rays.wavelength, wavelength)
np.testing.assert_allclose(rays.vignetted, False)
np.testing.assert_allclose(rays.failed, False)
np.testing.assert_allclose(rays.vx, dirCos[0])
np.testing.assert_allclose(rays.vy, dirCos[1])
np.testing.assert_allclose(rays.vz, dirCos[2])
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings*i:rings*(i+1)],
np.linspace(inner, outer, rings, endpoint=True)
)
for i in range(rings):
checkAngle(ths[i::rings], np.linspace(0, 2*np.pi, spokes, endpoint=False))
# Check explicit rings and spokes
rings = rng.uniform(inner, outer, rings)
spokes = rng.uniform(0, 2*np.pi, spokes)
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer, inner=inner,
rings=rings, spokes=spokes,
dirCos=dirCos
)
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Check Gaussian Quadrature
rings = rng.integers(5, 11)
spokes = 2*rings+1
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=outer,
rings=rings,
spacing='GQ',
dirCos=dirCos
)
assert len(rays) == spokes*rings
pupil = batoid.Plane()
pupil.intersect(rays)
radii = np.hypot(rays.x, rays.y)
ths = np.arctan2(rays.y, rays.x)
Li, w = np.polynomial.legendre.leggauss(rings)
rings = np.sqrt((1+Li)/2)*outer
flux = w*np.pi/(2*spokes)
spokes = np.linspace(0, 2*np.pi, spokes, endpoint=False)
for i in range(len(spokes)):
np.testing.assert_allclose(
radii[len(rings)*i:len(rings)*(i+1)],
rings
)
np.testing.assert_allclose(
rays.flux[len(rings)*i:len(rings)*(i+1)],
flux
)
for i in range(len(rings)):
checkAngle(
ths[i::len(rings)],
spokes
)
# Sanity check GQ grids against literature
# Values from Forbes JOSA Vol. 5, No. 11 (1988) Table 1
rings = [1, 2, 3, 4, 5, 6]
rad = [
[0.70710678],
[0.45970084, 0.88807383],
[0.33571069, 0.70710678, 0.94196515],
[0.26349923, 0.57446451, 0.81852949, 0.96465961],
[0.21658734, 0.48038042, 0.70710678, 0.87706023, 0.97626324],
[0.18375321, 0.41157661, 0.61700114, 0.78696226, 0.91137517, 0.98297241]
]
w = [
[0.5],
[0.25, 0.25],
[0.13888889, 0.22222222, 0.13888889],
[0.08696371, 0.16303629, 0.16303629, 0.08696371],
[0.05923172, 0.11965717, 0.14222222, 0.11965717, 0.05923172],
[0.04283112, 0.09019039, 0.11697848, 0.11697848, 0.09019039, 0.04283112]
]
for rings_, rad_, w_ in zip(rings, rad, w):
rays = batoid.RayVector.asSpokes(
backDist=backDist, wavelength=wavelength,
outer=1,
rings=rings_,
spacing='GQ',
dirCos=[0,0,-1]
)
spokes = rings_*2+1
radii = np.hypot(rays.x, rays.y)
for i in range(spokes):
np.testing.assert_allclose(
radii[rings_*i:rings_*(i+1)],
rad_
)
np.testing.assert_allclose(
rays.flux[rings_*i:rings_*(i+1)]*spokes/(2*np.pi),
w_
)
@timer
def test_factory_optic():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
grid1 = batoid.RayVector.asGrid(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
nx=16
)
grid2 = batoid.RayVector.asGrid(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, lx=telescope.pupilSize,
nx=16
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asPolar(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
naz=100, nrad=20
)
grid2 = batoid.RayVector.asPolar(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
inner=telescope.pupilSize/2*telescope.pupilObscuration,
naz=100, nrad=20
)
rays_allclose(grid1, grid2)
grid1 = batoid.RayVector.asSpokes(
optic=telescope, wavelength=500e-9, theta_x=0.1, theta_y=0.1,
rings=10, spokes=21
)
grid2 = batoid.RayVector.asSpokes(
wavelength=500e-9, theta_x=0.1, theta_y=0.1,
backDist=telescope.backDist, stopSurface=telescope.stopSurface,
medium=telescope.inMedium, outer=telescope.pupilSize/2,
rings=10, spokes=21
)
rays_allclose(grid1, grid2)
@timer
def test_getitem():
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
rv = batoid.RayVector.asPolar(
optic=telescope, wavelength=625e-9,
theta_x=np.deg2rad(1.0), theta_y=np.deg2rad(0.2),
nrad=10, naz=60
)
telescope.trace(rv)
# Single item indexing
for i in range(-len(rv), len(rv)):
rv1 = rv[i]
np.testing.assert_equal(rv1.r[0], rv.r[i])
np.testing.assert_equal(rv1.x[0], rv.x[i])
np.testing.assert_equal(rv1.y[0], rv.y[i])
np.testing.assert_equal(rv1.z[0], rv.z[i])
np.testing.assert_equal(rv1.v[0], rv.v[i])
np.testing.assert_equal(rv1.vx[0], rv.vx[i])
np.testing.assert_equal(rv1.vy[0], rv.vy[i])
np.testing.assert_equal(rv1.vz[0], rv.vz[i])
np.testing.assert_equal(rv1.t[0], rv.t[i])
np.testing.assert_equal(rv1.wavelength[0], rv.wavelength[i])
np.testing.assert_equal(rv1.flux[0], rv.flux[i])
np.testing.assert_equal(rv1.vignetted[0], rv.vignetted[i])
np.testing.assert_equal(rv1.failed[0], rv.failed[i])
assert rv1.r.flags.f_contiguous
assert rv1.v.flags.f_contiguous
# slice indexing
for i in range(-len(rv)//10, len(rv)//10):
slc = slice(i*10, (i+1)*10, 2)
rv2 = rv[slc]
np.testing.assert_equal(rv2.r, rv.r[slc])
np.testing.assert_equal(rv2.x, rv.x[slc])
np.testing.assert_equal(rv2.y, rv.y[slc])
np.testing.assert_equal(rv2.z, rv.z[slc])
np.testing.assert_equal(rv2.v, rv.v[slc])
np.testing.assert_equal(rv2.vx, rv.vx[slc])
np.testing.assert_equal(rv2.vy, rv.vy[slc])
np.testing.assert_equal(rv2.vz, rv.vz[slc])
np.testing.assert_equal(rv2.t, rv.t[slc])
np.testing.assert_equal(rv2.wavelength, rv.wavelength[slc])
np.testing.assert_equal(rv2.flux, rv.flux[slc])
np.testing.assert_equal(rv2.vignetted, rv.vignetted[slc])
np.testing.assert_equal(rv2.failed, rv.failed[slc])
assert rv2.r.flags.f_contiguous
assert rv2.v.flags.f_contiguous
# integer array indexing
idx = [0, -1, 1, -2, 2, -3, 50]
rv3 = rv[idx]
np.testing.assert_equal(rv3.r, rv.r[idx])
np.testing.assert_equal(rv3.x, rv.x[idx])
np.testing.assert_equal(rv3.y, rv.y[idx])
np.testing.assert_equal(rv3.z, rv.z[idx])
np.testing.assert_equal(rv3.v, rv.v[idx])
np.testing.assert_equal(rv3.vx, rv.vx[idx])
np.testing.assert_equal(rv3.vy, rv.vy[idx])
np.testing.assert_equal(rv3.vz, rv.vz[idx])
np.testing.assert_equal(rv3.t, rv.t[idx])
np.testing.assert_equal(rv3.wavelength, rv.wavelength[idx])
np.testing.assert_equal(rv3.flux, rv.flux[idx])
np.testing.assert_equal(rv3.vignetted, rv.vignetted[idx])
| np.testing.assert_equal(rv3.failed, rv.failed[idx]) | numpy.testing.assert_equal |
import argparse
import os
from tqdm import tqdm
import numpy as np
import collections
parser = argparse.ArgumentParser()
parser.add_argument('result_head', type=str, default=None)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
test_mode = 'pred_novel'
acc = collections.defaultdict(list)
err = collections.defaultdict(list)
errors_all = collections.defaultdict(list)
times = np.arange(1, args.runs + 1).tolist()
mean_acc = []
mean_err = []
for i in tqdm(times):
result_dir = os.path.join('{}_run{}'.format(args.result_head, i), test_mode)
test_classes = sorted(
[name.split('.')[0].replace('results_', '') for name in os.listdir(result_dir) if name.endswith('.npz')])
for cls in test_classes:
errors = np.load(os.path.join(result_dir, 'results_{}.npz'.format(cls)))['errors']
acc[cls].append(np.mean(errors <= 30))
err[cls].append(np.median(errors))
errors_all[i] = errors_all[i] + errors.tolist()
mean_acc.append(np.mean([acc[cls][i - 1] for cls in acc.keys()]))
mean_err.append(np.mean([err[cls][i - 1] for cls in acc.keys()]))
# perf for each cls averaged over multiple runs
for cls in acc.keys():
print('Class {}: Acc is {:.2f}; MedErr is {:.1f}'.format(cls, np.mean(acc[cls]), np.mean(err[cls])))
# perf across the whole dataset averaged over multiple runs
all_acc = [np.mean( | np.array(errors_all[i]) | numpy.array |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < | np.mean(ps2) | numpy.mean |
from scipy.spatial import SphericalVoronoi,ConvexHull,cKDTree,Delaunay
import numpy as np
import networkx as nx
import warnings
from matplotlib.tri.triangulation import Triangulation
from operator import itemgetter
from scipy.optimize import linear_sum_assignment
def matchKITsystems(chanposA,chanposB):
"""
returns idxes such that
chanposA==chanposB[matching]
returns:
siteBchannels -> one to one match
correspondings -> greedy match not one-to-one
"""
sensFeature = 'chanpos'
print('Distance between channels without positional matching:\n',np.sqrt(np.sum((chanposB-chanposA)**2,axis=-1)).sum())
ch_idxA = np.arange(160)
ch_idxB = np.arange(160)
correspondings = np.zeros(160)
comp_dist = []
for j in ch_idxA:
dists=[]
for i in ch_idxB:
distance = np.sum((chanposA[j]-chanposB[i])**2)
dists+=[np.sqrt(distance)]
min_dist = np.min(dists)
arg_min = np.argmin(dists)
correspondings[j]=arg_min
comp_dist+=[dists]
correspondings = np.array(correspondings).astype('int')
print('Greedy min distance matching is not a one-to-one matching: ')
print('channel correspondings: (Position denotes the ch_idx in A and value denotes the ch_idx in B)\n',correspondings)
print('Greedy matching has non unique correspondences: len(np.unique(correspondings))!=160 but:',len(np.unique(correspondings)))
print('Distance between channels with greedy matching: ', np.linalg.norm(chanposA-chanposB[correspondings]))
comp_dist = np.array(comp_dist).reshape(160,160)
print('Create one-to-one matching with bipartite node matching (Hungarian Algorithm).')
siteAchannels, siteBchannels = linear_sum_assignment(comp_dist)
print('Distance between channels with positional matching:\n',np.sum(comp_dist[siteAchannels,siteBchannels]))
return siteBchannels,correspondings
def edge_attr_dist(graph):
dist_keys = []
dist_vals = []
for n1 in np.arange(160):
for n2 in np.arange(n1+1,160):
dist_keys+=[(n1,n2)]
dist_vals+=[np.sqrt(np.sum((graph.nodes[n1]['sensloc']-graph.nodes[n2]['sensloc'])**2))]
return dict(zip(dist_keys,dist_vals))
def get_sensorGraph(triangles):
G = nx.Graph()
for path in triangles:
path = np.append(path, path[0])
nx.add_path(G, path)
return G
def set_node_attribute(graph,pts,attr_name,copy=False):
if copy:
raise NotImplementedError("copy has not been implemented")
nx.set_node_attributes(graph,dict(zip(np.arange(len(pts)),pts)),name=attr_name)
def set_edge_attribute(graph,edge_attr,attr_name,copy=False):
if copy:
raise NotImplementedError("copy has not been implemented")
nx.set_edge_attributes(graph,edge_attr,name=attr_name)
def triangles_spherical(points,radius,center,threshold=1e-6):
"""
params:
points: sensor locations projected on a sphere
copy pasted from:
https://github.com/scipy/scipy/blob/v1.5.4/scipy/spatial/_spherical_voronoi.py#L37-L345
"""
if radius is None:
radius = 1.
warnings.warn('`radius` is `None`. '
'This will raise an error in a future version. '
'Please provide a floating point number '
'(i.e. `radius=1`).',
DeprecationWarning)
radius = float(radius)
points = np.array(points).astype(np.double)
_dim = len(points[0])
if center is None:
center = np.zeros(_dim)
else:
center = np.array(center, dtype=float)
# test degenerate input
_rank = np.linalg.matrix_rank(points - points[0],
tol=threshold * radius)
if _rank < _dim:
raise ValueError("Rank of input points must be at least {0}".format(_dim))
if cKDTree(points).query_pairs(threshold * radius):
raise ValueError("Duplicate generators present.")
radii = np.linalg.norm(points - center, axis=1)
max_discrepancy = np.abs(radii - radius).max()
if max_discrepancy >= threshold * radius:
raise ValueError("Radius inconsistent with generators.")
conv = ConvexHull(points)
# get circumcenters of Convex Hull triangles from facet equations
# for 3D input circumcenters will have shape: (2N-4, 3)
vertices = radius * conv.equations[:, :-1] + center
simplices = conv.simplices
return simplices
def triangles_xy_surface(pts):
tri = Triangulation(pts[:,0],pts[:,1])
return tri.get_masked_triangles()
def triangles_xyz_Delaunay(pts):
return Delaunay(pts).simplices
def plot_Spherical_Voronoi(R,radius,center):
sv = SphericalVoronoi(R, radius, center)
# sort vertices (optional, helpful for plotting)
sv.sort_vertices_of_regions()
t_vals = np.linspace(0, 1, 2000)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# plot the unit sphere for reference (optional)
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))*radius-center[0]
y = np.outer(np.sin(u), np.sin(v))*radius-center[1]
z = np.outer(np.ones(np.size(u)), np.cos(v))*radius+center[2]
ax.plot_surface(x, y, z, color='y', alpha=0.1)
ax.scatter(R[:,0],R[:,1],R[:,2])
# plot Voronoi vertices
#ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
# c='g')
# indicate Voronoi regions (as Euclidean polygons)
for region in sv.regions:
n = len(region)
for i in range(n):
start = sv.vertices[region][i]
end = sv.vertices[region][(i + 1) % n]
#result = geometric_slerp(start, end, t_vals) # spherical interpolated edges
result = np.stack([start,end]) # direct edges
ax.plot(result[..., 0],
result[..., 1],
result[..., 2],
c='k')
#ax.plot(start[])
ax.azim = 10
ax.elev = 40
_ = ax.set_xticks([])
_ = ax.set_yticks([])
_ = ax.set_zticks([])
fig.set_size_inches(4, 4)
plt.show()
def project_to_sphere(pts,radius,center):
"""
params:
something like
radius = 5
center = np.array([0,0,-10])
https://stackoverflow.com/questions/9604132/how-to-project-a-point-on-to-a-sphere
For the simplest projection (along the line connecting the point to the center of the sphere):
Write the point in a coordinate system centered at the center of the sphere (x0,y0,z0):
P = (x',y',z') = (x - x0, y - y0, z - z0)
Compute the length of this vector:
|P| = sqrt(x'^2 + y'^2 + z'^2)
Scale the vector so that it has length equal to the radius of the sphere:
Q = (radius/|P|)*P
And change back to your original coordinate system to get the projection:
R = Q + (x0,y0,z0)
"""
P = pts-center
Pnorm = np.expand_dims(np.linalg.norm(P,axis=-1),axis=-1)
Q = (P*radius/Pnorm)
R = Q + center
return R
def scatter_on_sphere(R,radius,center):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# plot the unit sphere for reference (optional)
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))*radius-center[0]
y = np.outer(np.sin(u), np.sin(v))*radius-center[1]
z = np.outer(np.ones(np.size(u)), np.cos(v))*radius+center[2]
ax.plot_surface(x, y, z, color='y', alpha=0.1)
ax.scatter(R[:,0],R[:,1],R[:,2])
def befriend_sensors(graph,radius):
"""
The 2D triangulation is sometimes bad.
If two nodes don't have an edge, but have a distance < radius -> make an edge
params:
--------
radius:
"""
pass
def get_distance_distribution(graph):
distances = []
for node in np.arange(160):
neighbors = np.array(list(graph.neighbors(node)))
distances += [from_graph_get_neighbor_distances(graph,node,neighbors)]
return distances
def _fit_data_format(node_pos,neighbor_positions):
"""
node_pos, array shape [1,3] or [3]
neighbor_positions, array w/ shape [N_neighbors,3]
returns: array w/ shape [N_neighbors,3]
"""
node_pos = np.array(node_pos)
neighbor_positions = np.array(neighbor_positions)
if len(node_pos.shape)==1:
node_pos = np.expand_dims(node_pos,axis=0)
assert node_pos.shape[1]==neighbor_positions.shape[1]
return node_pos,neighbor_positions
def get_distance_to_neighbors(node_pos,neighbor_positions):
"""
node_pos, array shape [1,3] or [3]
neighbor_positions, array shape [N_neighbors,3]
-----
returns: array w/ shape [N_neighbors]
"""
node_pos,neighbor_positions = _fit_data_format(node_pos,neighbor_positions)
return np.sqrt(np.sum(get_displacement_to_neighbors(node_pos,neighbor_positions)**2,axis=-1))
def get_displacement_to_neighbors(node_pos,neighbor_positions):
"""
returns: array [N_neighbors,3]
"""
node_pos,neighbor_positions = _fit_data_format(node_pos,neighbor_positions)
return node_pos-neighbor_positions
def from_graph_get_displacement_to_neighbors(graph,node,neighbors=[]):
if len(neighbors)==0:
neighbors = np.array(list(graph.neighbors(node)))
node_pos = graph.nodes[node]['sensloc']
# itemgetter needs unpacked elements
neighbor_positions = np.array(itemgetter(*neighbors)(graph.nodes('sensloc')))
return get_displacement_to_neighbors(node_pos,neighbor_positions)
def from_graph_get_neighbor_distances(graph,node,neighbors=[]):
if len(neighbors)==0:
neighbors = np.array(list(graph.neighbors(node)))
node_pos = graph.nodes[node]['sensloc']
# itemgetter needs unpacked elements
neighbor_positions = np.array(itemgetter(*neighbors)(graph.nodes('sensloc')))
distances = get_distance_to_neighbors(node_pos,neighbor_positions)
return distances
def remove_neighbor(graph,node,neighbors_to_remove,copy=True):
"""
removes an edge
"""
if len(neighbors_to_remove)==0:
return graph
if copy:
reduced_graph = graph.copy()
for neighbor in neighbors_to_remove:
reduced_graph.remove_edge(node,neighbor)
return reduced_graph
else:
for neighbor in neighbors_to_remove:
reduced_graph.remove_edge(node,neighbor)
def remove_long_distance_neighbor(graph,node,th_dist,copy=True):
"""
graph: networkx object, with node_attribute 'sensloc'
param node: int, idx of the node
param th_dist: threshold, neighbors that are further away get chopped off
-------
return: reduced_graph
"""
neighbors = np.array(list(graph.neighbors(node)))
distances = from_graph_get_neighbor_distances(graph,node,neighbors)
neighbors_to_remove = neighbors[np.where(distances>th_dist)]
return remove_neighbor(graph,node,neighbors_to_remove,copy)
def get_edge_similarity(node_pos,neighbor_positions):
"""
useful for finding approximate colinear neighbors.
"""
displacements = get_displacement_to_neighbors(node_pos,neighbor_positions)
n_neighbors = neighbor_positions.shape[0]
# Quick and dirty, can reduce computation by factor 2.
similarity = []
for d1 in displacements:
for d2 in displacements:
similarity+=[np.sum(d1*d2)/(np.linalg.norm(d1)*np.linalg.norm(d2))]
similarity = np.array(similarity).reshape(n_neighbors,n_neighbors)
return similarity
def from_graph_get_edge_similarity(graph,node,neighbors=[]):
"""
useful for finding approximate colinear neighbors.
"""
if len(neighbors)==0:
neighbors = np.array(list(graph.neighbors(node)))
node_pos = graph.nodes[node]['sensloc']
# itemgetter needs unpacked elements
neighbor_positions = np.array(itemgetter(*neighbors)(graph.nodes('sensloc')))
similarity = get_edge_similarity(node_pos,neighbor_positions)
return similarity
def remove_colinear_neighbor(graph,node,th_max_colinearity = 0.99,copy=True):
"""
Remove neighbor if they have almost overlapping edges. (Longer edges will be removed.)
------
params:
graph: networkx object, with node_attribute 'sensloc'
param node: int, idx of the central node
param th_max_colinearity: threshold, of two neighbors that have edges with high colinearity
the neighbor that is further away is removed.
------
details:
A similarity matrix is computed, where the entries describe how much
the direction of node to neighbor_i is similiar to the direction of node to neighbor_j.
The neighbors which h
-------
return: reduced_graph
"""
neighbors = np.array(list(graph.neighbors(node)))
similarity = from_graph_get_edge_similarity(graph,node,neighbors)
node_pairs_idx = np.stack(np.where( | np.tril(similarity,-1) | numpy.tril |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.